datasets_document.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. # -*- coding:utf-8 -*-
  2. import random
  3. from datetime import datetime
  4. from typing import List
  5. from flask import request, current_app
  6. from flask_login import current_user
  7. from core.login.login import login_required
  8. from flask_restful import Resource, fields, marshal, marshal_with, reqparse
  9. from sqlalchemy import desc, asc
  10. from werkzeug.exceptions import NotFound, Forbidden
  11. import services
  12. from controllers.console import api
  13. from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
  14. ProviderModelCurrentlyNotSupportError
  15. from controllers.console.datasets.error import DocumentAlreadyFinishedError, InvalidActionError, DocumentIndexingError, \
  16. InvalidMetadataError, ArchivedDocumentImmutableError
  17. from controllers.console.setup import setup_required
  18. from controllers.console.wraps import account_initialization_required
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
  21. LLMBadRequestError
  22. from core.model_providers.model_factory import ModelFactory
  23. from extensions.ext_redis import redis_client
  24. from libs.helper import TimestampField
  25. from extensions.ext_database import db
  26. from models.dataset import DatasetProcessRule, Dataset
  27. from models.dataset import Document, DocumentSegment
  28. from models.model import UploadFile
  29. from services.dataset_service import DocumentService, DatasetService
  30. from tasks.add_document_to_index_task import add_document_to_index_task
  31. from tasks.remove_document_from_index_task import remove_document_from_index_task
  32. dataset_fields = {
  33. 'id': fields.String,
  34. 'name': fields.String,
  35. 'description': fields.String,
  36. 'permission': fields.String,
  37. 'data_source_type': fields.String,
  38. 'indexing_technique': fields.String,
  39. 'created_by': fields.String,
  40. 'created_at': TimestampField,
  41. }
  42. document_fields = {
  43. 'id': fields.String,
  44. 'position': fields.Integer,
  45. 'data_source_type': fields.String,
  46. 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
  47. 'dataset_process_rule_id': fields.String,
  48. 'name': fields.String,
  49. 'created_from': fields.String,
  50. 'created_by': fields.String,
  51. 'created_at': TimestampField,
  52. 'tokens': fields.Integer,
  53. 'indexing_status': fields.String,
  54. 'error': fields.String,
  55. 'enabled': fields.Boolean,
  56. 'disabled_at': TimestampField,
  57. 'disabled_by': fields.String,
  58. 'archived': fields.Boolean,
  59. 'display_status': fields.String,
  60. 'word_count': fields.Integer,
  61. 'hit_count': fields.Integer,
  62. 'doc_form': fields.String,
  63. }
  64. document_with_segments_fields = {
  65. 'id': fields.String,
  66. 'position': fields.Integer,
  67. 'data_source_type': fields.String,
  68. 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
  69. 'dataset_process_rule_id': fields.String,
  70. 'name': fields.String,
  71. 'created_from': fields.String,
  72. 'created_by': fields.String,
  73. 'created_at': TimestampField,
  74. 'tokens': fields.Integer,
  75. 'indexing_status': fields.String,
  76. 'error': fields.String,
  77. 'enabled': fields.Boolean,
  78. 'disabled_at': TimestampField,
  79. 'disabled_by': fields.String,
  80. 'archived': fields.Boolean,
  81. 'display_status': fields.String,
  82. 'word_count': fields.Integer,
  83. 'hit_count': fields.Integer,
  84. 'completed_segments': fields.Integer,
  85. 'total_segments': fields.Integer
  86. }
  87. class DocumentResource(Resource):
  88. def get_document(self, dataset_id: str, document_id: str) -> Document:
  89. dataset = DatasetService.get_dataset(dataset_id)
  90. if not dataset:
  91. raise NotFound('Dataset not found.')
  92. try:
  93. DatasetService.check_dataset_permission(dataset, current_user)
  94. except services.errors.account.NoPermissionError as e:
  95. raise Forbidden(str(e))
  96. document = DocumentService.get_document(dataset_id, document_id)
  97. if not document:
  98. raise NotFound('Document not found.')
  99. if document.tenant_id != current_user.current_tenant_id:
  100. raise Forbidden('No permission.')
  101. return document
  102. def get_batch_documents(self, dataset_id: str, batch: str) -> List[Document]:
  103. dataset = DatasetService.get_dataset(dataset_id)
  104. if not dataset:
  105. raise NotFound('Dataset not found.')
  106. try:
  107. DatasetService.check_dataset_permission(dataset, current_user)
  108. except services.errors.account.NoPermissionError as e:
  109. raise Forbidden(str(e))
  110. documents = DocumentService.get_batch_documents(dataset_id, batch)
  111. if not documents:
  112. raise NotFound('Documents not found.')
  113. return documents
  114. class GetProcessRuleApi(Resource):
  115. @setup_required
  116. @login_required
  117. @account_initialization_required
  118. def get(self):
  119. req_data = request.args
  120. document_id = req_data.get('document_id')
  121. if document_id:
  122. # get the latest process rule
  123. document = Document.query.get_or_404(document_id)
  124. dataset = DatasetService.get_dataset(document.dataset_id)
  125. if not dataset:
  126. raise NotFound('Dataset not found.')
  127. try:
  128. DatasetService.check_dataset_permission(dataset, current_user)
  129. except services.errors.account.NoPermissionError as e:
  130. raise Forbidden(str(e))
  131. # get the latest process rule
  132. dataset_process_rule = db.session.query(DatasetProcessRule). \
  133. filter(DatasetProcessRule.dataset_id == document.dataset_id). \
  134. order_by(DatasetProcessRule.created_at.desc()). \
  135. limit(1). \
  136. one_or_none()
  137. mode = dataset_process_rule.mode
  138. rules = dataset_process_rule.rules_dict
  139. else:
  140. mode = DocumentService.DEFAULT_RULES['mode']
  141. rules = DocumentService.DEFAULT_RULES['rules']
  142. return {
  143. 'mode': mode,
  144. 'rules': rules
  145. }
  146. class DatasetDocumentListApi(Resource):
  147. @setup_required
  148. @login_required
  149. @account_initialization_required
  150. def get(self, dataset_id):
  151. dataset_id = str(dataset_id)
  152. page = request.args.get('page', default=1, type=int)
  153. limit = request.args.get('limit', default=20, type=int)
  154. search = request.args.get('keyword', default=None, type=str)
  155. sort = request.args.get('sort', default='-created_at', type=str)
  156. fetch = request.args.get('fetch', default=False, type=bool)
  157. dataset = DatasetService.get_dataset(dataset_id)
  158. if not dataset:
  159. raise NotFound('Dataset not found.')
  160. try:
  161. DatasetService.check_dataset_permission(dataset, current_user)
  162. except services.errors.account.NoPermissionError as e:
  163. raise Forbidden(str(e))
  164. query = Document.query.filter_by(
  165. dataset_id=str(dataset_id), tenant_id=current_user.current_tenant_id)
  166. if search:
  167. search = f'%{search}%'
  168. query = query.filter(Document.name.like(search))
  169. if sort.startswith('-'):
  170. sort_logic = desc
  171. sort = sort[1:]
  172. else:
  173. sort_logic = asc
  174. if sort == 'hit_count':
  175. sub_query = db.select(DocumentSegment.document_id,
  176. db.func.sum(DocumentSegment.hit_count).label("total_hit_count")) \
  177. .group_by(DocumentSegment.document_id) \
  178. .subquery()
  179. query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id) \
  180. .order_by(sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)))
  181. elif sort == 'created_at':
  182. query = query.order_by(sort_logic(Document.created_at))
  183. else:
  184. query = query.order_by(desc(Document.created_at))
  185. paginated_documents = query.paginate(
  186. page=page, per_page=limit, max_per_page=100, error_out=False)
  187. documents = paginated_documents.items
  188. if fetch:
  189. for document in documents:
  190. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  191. DocumentSegment.document_id == str(document.id),
  192. DocumentSegment.status != 're_segment').count()
  193. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  194. DocumentSegment.status != 're_segment').count()
  195. document.completed_segments = completed_segments
  196. document.total_segments = total_segments
  197. data = marshal(documents, document_with_segments_fields)
  198. else:
  199. data = marshal(documents, document_fields)
  200. response = {
  201. 'data': data,
  202. 'has_more': len(documents) == limit,
  203. 'limit': limit,
  204. 'total': paginated_documents.total,
  205. 'page': page
  206. }
  207. return response
  208. documents_and_batch_fields = {
  209. 'documents': fields.List(fields.Nested(document_fields)),
  210. 'batch': fields.String
  211. }
  212. @setup_required
  213. @login_required
  214. @account_initialization_required
  215. @marshal_with(documents_and_batch_fields)
  216. def post(self, dataset_id):
  217. dataset_id = str(dataset_id)
  218. dataset = DatasetService.get_dataset(dataset_id)
  219. if not dataset:
  220. raise NotFound('Dataset not found.')
  221. # The role of the current user in the ta table must be admin or owner
  222. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  223. raise Forbidden()
  224. try:
  225. DatasetService.check_dataset_permission(dataset, current_user)
  226. except services.errors.account.NoPermissionError as e:
  227. raise Forbidden(str(e))
  228. parser = reqparse.RequestParser()
  229. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  230. location='json')
  231. parser.add_argument('data_source', type=dict, required=False, location='json')
  232. parser.add_argument('process_rule', type=dict, required=False, location='json')
  233. parser.add_argument('duplicate', type=bool, nullable=False, location='json')
  234. parser.add_argument('original_document_id', type=str, required=False, location='json')
  235. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  236. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  237. location='json')
  238. args = parser.parse_args()
  239. if not dataset.indexing_technique and not args['indexing_technique']:
  240. raise ValueError('indexing_technique is required.')
  241. # validate args
  242. DocumentService.document_create_args_validate(args)
  243. # check embedding model setting
  244. try:
  245. ModelFactory.get_embedding_model(
  246. tenant_id=current_user.current_tenant_id,
  247. model_provider_name=dataset.embedding_model_provider,
  248. model_name=dataset.embedding_model
  249. )
  250. except LLMBadRequestError:
  251. raise ProviderNotInitializeError(
  252. f"No Embedding Model available. Please configure a valid provider "
  253. f"in the Settings -> Model Provider.")
  254. except ProviderTokenNotInitError as ex:
  255. raise ProviderNotInitializeError(ex.description)
  256. try:
  257. documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
  258. except ProviderTokenNotInitError as ex:
  259. raise ProviderNotInitializeError(ex.description)
  260. except QuotaExceededError:
  261. raise ProviderQuotaExceededError()
  262. except ModelCurrentlyNotSupportError:
  263. raise ProviderModelCurrentlyNotSupportError()
  264. return {
  265. 'documents': documents,
  266. 'batch': batch
  267. }
  268. class DatasetInitApi(Resource):
  269. dataset_and_document_fields = {
  270. 'dataset': fields.Nested(dataset_fields),
  271. 'documents': fields.List(fields.Nested(document_fields)),
  272. 'batch': fields.String
  273. }
  274. @setup_required
  275. @login_required
  276. @account_initialization_required
  277. @marshal_with(dataset_and_document_fields)
  278. def post(self):
  279. # The role of the current user in the ta table must be admin or owner
  280. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  281. raise Forbidden()
  282. parser = reqparse.RequestParser()
  283. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, required=True,
  284. nullable=False, location='json')
  285. parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
  286. parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
  287. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  288. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  289. location='json')
  290. args = parser.parse_args()
  291. try:
  292. ModelFactory.get_embedding_model(
  293. tenant_id=current_user.current_tenant_id
  294. )
  295. except LLMBadRequestError:
  296. raise ProviderNotInitializeError(
  297. f"No Embedding Model available. Please configure a valid provider "
  298. f"in the Settings -> Model Provider.")
  299. # validate args
  300. DocumentService.document_create_args_validate(args)
  301. try:
  302. dataset, documents, batch = DocumentService.save_document_without_dataset_id(
  303. tenant_id=current_user.current_tenant_id,
  304. document_data=args,
  305. account=current_user
  306. )
  307. except ProviderTokenNotInitError as ex:
  308. raise ProviderNotInitializeError(ex.description)
  309. except QuotaExceededError:
  310. raise ProviderQuotaExceededError()
  311. except ModelCurrentlyNotSupportError:
  312. raise ProviderModelCurrentlyNotSupportError()
  313. response = {
  314. 'dataset': dataset,
  315. 'documents': documents,
  316. 'batch': batch
  317. }
  318. return response
  319. class DocumentIndexingEstimateApi(DocumentResource):
  320. @setup_required
  321. @login_required
  322. @account_initialization_required
  323. def get(self, dataset_id, document_id):
  324. dataset_id = str(dataset_id)
  325. document_id = str(document_id)
  326. document = self.get_document(dataset_id, document_id)
  327. if document.indexing_status in ['completed', 'error']:
  328. raise DocumentAlreadyFinishedError()
  329. data_process_rule = document.dataset_process_rule
  330. data_process_rule_dict = data_process_rule.to_dict()
  331. response = {
  332. "tokens": 0,
  333. "total_price": 0,
  334. "currency": "USD",
  335. "total_segments": 0,
  336. "preview": []
  337. }
  338. if document.data_source_type == 'upload_file':
  339. data_source_info = document.data_source_info_dict
  340. if data_source_info and 'upload_file_id' in data_source_info:
  341. file_id = data_source_info['upload_file_id']
  342. file = db.session.query(UploadFile).filter(
  343. UploadFile.tenant_id == document.tenant_id,
  344. UploadFile.id == file_id
  345. ).first()
  346. # raise error if file not found
  347. if not file:
  348. raise NotFound('File not found.')
  349. indexing_runner = IndexingRunner()
  350. try:
  351. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, [file],
  352. data_process_rule_dict, None,
  353. 'English', dataset_id)
  354. except LLMBadRequestError:
  355. raise ProviderNotInitializeError(
  356. f"No Embedding Model available. Please configure a valid provider "
  357. f"in the Settings -> Model Provider.")
  358. except ProviderTokenNotInitError as ex:
  359. raise ProviderNotInitializeError(ex.description)
  360. return response
  361. class DocumentBatchIndexingEstimateApi(DocumentResource):
  362. @setup_required
  363. @login_required
  364. @account_initialization_required
  365. def get(self, dataset_id, batch):
  366. dataset_id = str(dataset_id)
  367. batch = str(batch)
  368. dataset = DatasetService.get_dataset(dataset_id)
  369. if dataset is None:
  370. raise NotFound("Dataset not found.")
  371. documents = self.get_batch_documents(dataset_id, batch)
  372. response = {
  373. "tokens": 0,
  374. "total_price": 0,
  375. "currency": "USD",
  376. "total_segments": 0,
  377. "preview": []
  378. }
  379. if not documents:
  380. return response
  381. data_process_rule = documents[0].dataset_process_rule
  382. data_process_rule_dict = data_process_rule.to_dict()
  383. info_list = []
  384. for document in documents:
  385. if document.indexing_status in ['completed', 'error']:
  386. raise DocumentAlreadyFinishedError()
  387. data_source_info = document.data_source_info_dict
  388. # format document files info
  389. if data_source_info and 'upload_file_id' in data_source_info:
  390. file_id = data_source_info['upload_file_id']
  391. info_list.append(file_id)
  392. # format document notion info
  393. elif data_source_info and 'notion_workspace_id' in data_source_info and 'notion_page_id' in data_source_info:
  394. pages = []
  395. page = {
  396. 'page_id': data_source_info['notion_page_id'],
  397. 'type': data_source_info['type']
  398. }
  399. pages.append(page)
  400. notion_info = {
  401. 'workspace_id': data_source_info['notion_workspace_id'],
  402. 'pages': pages
  403. }
  404. info_list.append(notion_info)
  405. if dataset.data_source_type == 'upload_file':
  406. file_details = db.session.query(UploadFile).filter(
  407. UploadFile.tenant_id == current_user.current_tenant_id,
  408. UploadFile.id in info_list
  409. ).all()
  410. if file_details is None:
  411. raise NotFound("File not found.")
  412. indexing_runner = IndexingRunner()
  413. try:
  414. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
  415. data_process_rule_dict, None,
  416. 'English', dataset_id)
  417. except LLMBadRequestError:
  418. raise ProviderNotInitializeError(
  419. f"No Embedding Model available. Please configure a valid provider "
  420. f"in the Settings -> Model Provider.")
  421. except ProviderTokenNotInitError as ex:
  422. raise ProviderNotInitializeError(ex.description)
  423. elif dataset.data_source_type == 'notion_import':
  424. indexing_runner = IndexingRunner()
  425. try:
  426. response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
  427. info_list,
  428. data_process_rule_dict,
  429. None, 'English', dataset_id)
  430. except LLMBadRequestError:
  431. raise ProviderNotInitializeError(
  432. f"No Embedding Model available. Please configure a valid provider "
  433. f"in the Settings -> Model Provider.")
  434. except ProviderTokenNotInitError as ex:
  435. raise ProviderNotInitializeError(ex.description)
  436. else:
  437. raise ValueError('Data source type not support')
  438. return response
  439. class DocumentBatchIndexingStatusApi(DocumentResource):
  440. document_status_fields = {
  441. 'id': fields.String,
  442. 'indexing_status': fields.String,
  443. 'processing_started_at': TimestampField,
  444. 'parsing_completed_at': TimestampField,
  445. 'cleaning_completed_at': TimestampField,
  446. 'splitting_completed_at': TimestampField,
  447. 'completed_at': TimestampField,
  448. 'paused_at': TimestampField,
  449. 'error': fields.String,
  450. 'stopped_at': TimestampField,
  451. 'completed_segments': fields.Integer,
  452. 'total_segments': fields.Integer,
  453. }
  454. document_status_fields_list = {
  455. 'data': fields.List(fields.Nested(document_status_fields))
  456. }
  457. @setup_required
  458. @login_required
  459. @account_initialization_required
  460. def get(self, dataset_id, batch):
  461. dataset_id = str(dataset_id)
  462. batch = str(batch)
  463. documents = self.get_batch_documents(dataset_id, batch)
  464. documents_status = []
  465. for document in documents:
  466. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  467. DocumentSegment.document_id == str(document.id),
  468. DocumentSegment.status != 're_segment').count()
  469. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  470. DocumentSegment.status != 're_segment').count()
  471. document.completed_segments = completed_segments
  472. document.total_segments = total_segments
  473. if document.is_paused:
  474. document.indexing_status = 'paused'
  475. documents_status.append(marshal(document, self.document_status_fields))
  476. data = {
  477. 'data': documents_status
  478. }
  479. return data
  480. class DocumentIndexingStatusApi(DocumentResource):
  481. document_status_fields = {
  482. 'id': fields.String,
  483. 'indexing_status': fields.String,
  484. 'processing_started_at': TimestampField,
  485. 'parsing_completed_at': TimestampField,
  486. 'cleaning_completed_at': TimestampField,
  487. 'splitting_completed_at': TimestampField,
  488. 'completed_at': TimestampField,
  489. 'paused_at': TimestampField,
  490. 'error': fields.String,
  491. 'stopped_at': TimestampField,
  492. 'completed_segments': fields.Integer,
  493. 'total_segments': fields.Integer,
  494. }
  495. @setup_required
  496. @login_required
  497. @account_initialization_required
  498. def get(self, dataset_id, document_id):
  499. dataset_id = str(dataset_id)
  500. document_id = str(document_id)
  501. document = self.get_document(dataset_id, document_id)
  502. completed_segments = DocumentSegment.query \
  503. .filter(DocumentSegment.completed_at.isnot(None),
  504. DocumentSegment.document_id == str(document_id),
  505. DocumentSegment.status != 're_segment') \
  506. .count()
  507. total_segments = DocumentSegment.query \
  508. .filter(DocumentSegment.document_id == str(document_id),
  509. DocumentSegment.status != 're_segment') \
  510. .count()
  511. document.completed_segments = completed_segments
  512. document.total_segments = total_segments
  513. if document.is_paused:
  514. document.indexing_status = 'paused'
  515. return marshal(document, self.document_status_fields)
  516. class DocumentDetailApi(DocumentResource):
  517. METADATA_CHOICES = {'all', 'only', 'without'}
  518. @setup_required
  519. @login_required
  520. @account_initialization_required
  521. def get(self, dataset_id, document_id):
  522. dataset_id = str(dataset_id)
  523. document_id = str(document_id)
  524. document = self.get_document(dataset_id, document_id)
  525. metadata = request.args.get('metadata', 'all')
  526. if metadata not in self.METADATA_CHOICES:
  527. raise InvalidMetadataError(f'Invalid metadata value: {metadata}')
  528. if metadata == 'only':
  529. response = {
  530. 'id': document.id,
  531. 'doc_type': document.doc_type,
  532. 'doc_metadata': document.doc_metadata
  533. }
  534. elif metadata == 'without':
  535. process_rules = DatasetService.get_process_rules(dataset_id)
  536. data_source_info = document.data_source_detail_dict
  537. response = {
  538. 'id': document.id,
  539. 'position': document.position,
  540. 'data_source_type': document.data_source_type,
  541. 'data_source_info': data_source_info,
  542. 'dataset_process_rule_id': document.dataset_process_rule_id,
  543. 'dataset_process_rule': process_rules,
  544. 'name': document.name,
  545. 'created_from': document.created_from,
  546. 'created_by': document.created_by,
  547. 'created_at': document.created_at.timestamp(),
  548. 'tokens': document.tokens,
  549. 'indexing_status': document.indexing_status,
  550. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  551. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  552. 'indexing_latency': document.indexing_latency,
  553. 'error': document.error,
  554. 'enabled': document.enabled,
  555. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  556. 'disabled_by': document.disabled_by,
  557. 'archived': document.archived,
  558. 'segment_count': document.segment_count,
  559. 'average_segment_length': document.average_segment_length,
  560. 'hit_count': document.hit_count,
  561. 'display_status': document.display_status,
  562. 'doc_form': document.doc_form
  563. }
  564. else:
  565. process_rules = DatasetService.get_process_rules(dataset_id)
  566. data_source_info = document.data_source_detail_dict_()
  567. response = {
  568. 'id': document.id,
  569. 'position': document.position,
  570. 'data_source_type': document.data_source_type,
  571. 'data_source_info': data_source_info,
  572. 'dataset_process_rule_id': document.dataset_process_rule_id,
  573. 'dataset_process_rule': process_rules,
  574. 'name': document.name,
  575. 'created_from': document.created_from,
  576. 'created_by': document.created_by,
  577. 'created_at': document.created_at.timestamp(),
  578. 'tokens': document.tokens,
  579. 'indexing_status': document.indexing_status,
  580. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  581. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  582. 'indexing_latency': document.indexing_latency,
  583. 'error': document.error,
  584. 'enabled': document.enabled,
  585. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  586. 'disabled_by': document.disabled_by,
  587. 'archived': document.archived,
  588. 'doc_type': document.doc_type,
  589. 'doc_metadata': document.doc_metadata,
  590. 'segment_count': document.segment_count,
  591. 'average_segment_length': document.average_segment_length,
  592. 'hit_count': document.hit_count,
  593. 'display_status': document.display_status,
  594. 'doc_form': document.doc_form
  595. }
  596. return response, 200
  597. class DocumentProcessingApi(DocumentResource):
  598. @setup_required
  599. @login_required
  600. @account_initialization_required
  601. def patch(self, dataset_id, document_id, action):
  602. dataset_id = str(dataset_id)
  603. document_id = str(document_id)
  604. document = self.get_document(dataset_id, document_id)
  605. # The role of the current user in the ta table must be admin or owner
  606. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  607. raise Forbidden()
  608. if action == "pause":
  609. if document.indexing_status != "indexing":
  610. raise InvalidActionError('Document not in indexing state.')
  611. document.paused_by = current_user.id
  612. document.paused_at = datetime.utcnow()
  613. document.is_paused = True
  614. db.session.commit()
  615. elif action == "resume":
  616. if document.indexing_status not in ["paused", "error"]:
  617. raise InvalidActionError('Document not in paused or error state.')
  618. document.paused_by = None
  619. document.paused_at = None
  620. document.is_paused = False
  621. db.session.commit()
  622. else:
  623. raise InvalidActionError()
  624. return {'result': 'success'}, 200
  625. class DocumentDeleteApi(DocumentResource):
  626. @setup_required
  627. @login_required
  628. @account_initialization_required
  629. def delete(self, dataset_id, document_id):
  630. dataset_id = str(dataset_id)
  631. document_id = str(document_id)
  632. document = self.get_document(dataset_id, document_id)
  633. try:
  634. DocumentService.delete_document(document)
  635. except services.errors.document.DocumentIndexingError:
  636. raise DocumentIndexingError('Cannot delete document during indexing.')
  637. return {'result': 'success'}, 204
  638. class DocumentMetadataApi(DocumentResource):
  639. @setup_required
  640. @login_required
  641. @account_initialization_required
  642. def put(self, dataset_id, document_id):
  643. dataset_id = str(dataset_id)
  644. document_id = str(document_id)
  645. document = self.get_document(dataset_id, document_id)
  646. req_data = request.get_json()
  647. doc_type = req_data.get('doc_type')
  648. doc_metadata = req_data.get('doc_metadata')
  649. # The role of the current user in the ta table must be admin or owner
  650. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  651. raise Forbidden()
  652. if doc_type is None or doc_metadata is None:
  653. raise ValueError('Both doc_type and doc_metadata must be provided.')
  654. if doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
  655. raise ValueError('Invalid doc_type.')
  656. if not isinstance(doc_metadata, dict):
  657. raise ValueError('doc_metadata must be a dictionary.')
  658. metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
  659. document.doc_metadata = {}
  660. if doc_type == 'others':
  661. document.doc_metadata = doc_metadata
  662. else:
  663. for key, value_type in metadata_schema.items():
  664. value = doc_metadata.get(key)
  665. if value is not None and isinstance(value, value_type):
  666. document.doc_metadata[key] = value
  667. document.doc_type = doc_type
  668. document.updated_at = datetime.utcnow()
  669. db.session.commit()
  670. return {'result': 'success', 'message': 'Document metadata updated.'}, 200
  671. class DocumentStatusApi(DocumentResource):
  672. @setup_required
  673. @login_required
  674. @account_initialization_required
  675. def patch(self, dataset_id, document_id, action):
  676. dataset_id = str(dataset_id)
  677. document_id = str(document_id)
  678. document = self.get_document(dataset_id, document_id)
  679. # The role of the current user in the ta table must be admin or owner
  680. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  681. raise Forbidden()
  682. indexing_cache_key = 'document_{}_indexing'.format(document.id)
  683. cache_result = redis_client.get(indexing_cache_key)
  684. if cache_result is not None:
  685. raise InvalidActionError("Document is being indexed, please try again later")
  686. if action == "enable":
  687. if document.enabled:
  688. raise InvalidActionError('Document already enabled.')
  689. document.enabled = True
  690. document.disabled_at = None
  691. document.disabled_by = None
  692. document.updated_at = datetime.utcnow()
  693. db.session.commit()
  694. # Set cache to prevent indexing the same document multiple times
  695. redis_client.setex(indexing_cache_key, 600, 1)
  696. add_document_to_index_task.delay(document_id)
  697. return {'result': 'success'}, 200
  698. elif action == "disable":
  699. if not document.completed_at or document.indexing_status != 'completed':
  700. raise InvalidActionError('Document is not completed.')
  701. if not document.enabled:
  702. raise InvalidActionError('Document already disabled.')
  703. document.enabled = False
  704. document.disabled_at = datetime.utcnow()
  705. document.disabled_by = current_user.id
  706. document.updated_at = datetime.utcnow()
  707. db.session.commit()
  708. # Set cache to prevent indexing the same document multiple times
  709. redis_client.setex(indexing_cache_key, 600, 1)
  710. remove_document_from_index_task.delay(document_id)
  711. return {'result': 'success'}, 200
  712. elif action == "archive":
  713. if document.archived:
  714. raise InvalidActionError('Document already archived.')
  715. document.archived = True
  716. document.archived_at = datetime.utcnow()
  717. document.archived_by = current_user.id
  718. document.updated_at = datetime.utcnow()
  719. db.session.commit()
  720. if document.enabled:
  721. # Set cache to prevent indexing the same document multiple times
  722. redis_client.setex(indexing_cache_key, 600, 1)
  723. remove_document_from_index_task.delay(document_id)
  724. return {'result': 'success'}, 200
  725. elif action == "un_archive":
  726. if not document.archived:
  727. raise InvalidActionError('Document is not archived.')
  728. # check document limit
  729. if current_app.config['EDITION'] == 'CLOUD':
  730. documents_count = DocumentService.get_tenant_documents_count()
  731. total_count = documents_count + 1
  732. tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
  733. if total_count > tenant_document_count:
  734. raise ValueError(f"All your documents have overed limit {tenant_document_count}.")
  735. document.archived = False
  736. document.archived_at = None
  737. document.archived_by = None
  738. document.updated_at = datetime.utcnow()
  739. db.session.commit()
  740. # Set cache to prevent indexing the same document multiple times
  741. redis_client.setex(indexing_cache_key, 600, 1)
  742. add_document_to_index_task.delay(document_id)
  743. return {'result': 'success'}, 200
  744. else:
  745. raise InvalidActionError()
  746. class DocumentPauseApi(DocumentResource):
  747. @setup_required
  748. @login_required
  749. @account_initialization_required
  750. def patch(self, dataset_id, document_id):
  751. """pause document."""
  752. dataset_id = str(dataset_id)
  753. document_id = str(document_id)
  754. dataset = DatasetService.get_dataset(dataset_id)
  755. if not dataset:
  756. raise NotFound('Dataset not found.')
  757. document = DocumentService.get_document(dataset.id, document_id)
  758. # 404 if document not found
  759. if document is None:
  760. raise NotFound("Document Not Exists.")
  761. # 403 if document is archived
  762. if DocumentService.check_archived(document):
  763. raise ArchivedDocumentImmutableError()
  764. try:
  765. # pause document
  766. DocumentService.pause_document(document)
  767. except services.errors.document.DocumentIndexingError:
  768. raise DocumentIndexingError('Cannot pause completed document.')
  769. return {'result': 'success'}, 204
  770. class DocumentRecoverApi(DocumentResource):
  771. @setup_required
  772. @login_required
  773. @account_initialization_required
  774. def patch(self, dataset_id, document_id):
  775. """recover document."""
  776. dataset_id = str(dataset_id)
  777. document_id = str(document_id)
  778. dataset = DatasetService.get_dataset(dataset_id)
  779. if not dataset:
  780. raise NotFound('Dataset not found.')
  781. document = DocumentService.get_document(dataset.id, document_id)
  782. # 404 if document not found
  783. if document is None:
  784. raise NotFound("Document Not Exists.")
  785. # 403 if document is archived
  786. if DocumentService.check_archived(document):
  787. raise ArchivedDocumentImmutableError()
  788. try:
  789. # pause document
  790. DocumentService.recover_document(document)
  791. except services.errors.document.DocumentIndexingError:
  792. raise DocumentIndexingError('Document is not in paused status.')
  793. return {'result': 'success'}, 204
  794. class DocumentLimitApi(DocumentResource):
  795. @setup_required
  796. @login_required
  797. @account_initialization_required
  798. def get(self):
  799. """get document limit"""
  800. documents_count = DocumentService.get_tenant_documents_count()
  801. tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
  802. return {
  803. 'documents_count': documents_count,
  804. 'documents_limit': tenant_document_count
  805. }, 200
  806. api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
  807. api.add_resource(DatasetDocumentListApi,
  808. '/datasets/<uuid:dataset_id>/documents')
  809. api.add_resource(DatasetInitApi,
  810. '/datasets/init')
  811. api.add_resource(DocumentIndexingEstimateApi,
  812. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate')
  813. api.add_resource(DocumentBatchIndexingEstimateApi,
  814. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate')
  815. api.add_resource(DocumentBatchIndexingStatusApi,
  816. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status')
  817. api.add_resource(DocumentIndexingStatusApi,
  818. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status')
  819. api.add_resource(DocumentDetailApi,
  820. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  821. api.add_resource(DocumentProcessingApi,
  822. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/<string:action>')
  823. api.add_resource(DocumentDeleteApi,
  824. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  825. api.add_resource(DocumentMetadataApi,
  826. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata')
  827. api.add_resource(DocumentStatusApi,
  828. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
  829. api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
  830. api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')
  831. api.add_resource(DocumentLimitApi, '/datasets/limit')