datasets_document.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. # -*- coding:utf-8 -*-
  2. import random
  3. from datetime import datetime
  4. from typing import List
  5. from flask import request
  6. from flask_login import login_required, current_user
  7. from flask_restful import Resource, fields, marshal, marshal_with, reqparse
  8. from sqlalchemy import desc, asc
  9. from werkzeug.exceptions import NotFound, Forbidden
  10. import services
  11. from controllers.console import api
  12. from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
  13. ProviderModelCurrentlyNotSupportError
  14. from controllers.console.datasets.error import DocumentAlreadyFinishedError, InvalidActionError, DocumentIndexingError, \
  15. InvalidMetadataError, ArchivedDocumentImmutableError
  16. from controllers.console.setup import setup_required
  17. from controllers.console.wraps import account_initialization_required
  18. from core.indexing_runner import IndexingRunner
  19. from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
  20. LLMBadRequestError
  21. from core.model_providers.model_factory import ModelFactory
  22. from extensions.ext_redis import redis_client
  23. from libs.helper import TimestampField
  24. from extensions.ext_database import db
  25. from models.dataset import DatasetProcessRule, Dataset
  26. from models.dataset import Document, DocumentSegment
  27. from models.model import UploadFile
  28. from services.dataset_service import DocumentService, DatasetService
  29. from tasks.add_document_to_index_task import add_document_to_index_task
  30. from tasks.remove_document_from_index_task import remove_document_from_index_task
  31. dataset_fields = {
  32. 'id': fields.String,
  33. 'name': fields.String,
  34. 'description': fields.String,
  35. 'permission': fields.String,
  36. 'data_source_type': fields.String,
  37. 'indexing_technique': fields.String,
  38. 'created_by': fields.String,
  39. 'created_at': TimestampField,
  40. }
  41. document_fields = {
  42. 'id': fields.String,
  43. 'position': fields.Integer,
  44. 'data_source_type': fields.String,
  45. 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
  46. 'dataset_process_rule_id': fields.String,
  47. 'name': fields.String,
  48. 'created_from': fields.String,
  49. 'created_by': fields.String,
  50. 'created_at': TimestampField,
  51. 'tokens': fields.Integer,
  52. 'indexing_status': fields.String,
  53. 'error': fields.String,
  54. 'enabled': fields.Boolean,
  55. 'disabled_at': TimestampField,
  56. 'disabled_by': fields.String,
  57. 'archived': fields.Boolean,
  58. 'display_status': fields.String,
  59. 'word_count': fields.Integer,
  60. 'hit_count': fields.Integer,
  61. 'doc_form': fields.String,
  62. }
  63. document_with_segments_fields = {
  64. 'id': fields.String,
  65. 'position': fields.Integer,
  66. 'data_source_type': fields.String,
  67. 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
  68. 'dataset_process_rule_id': fields.String,
  69. 'name': fields.String,
  70. 'created_from': fields.String,
  71. 'created_by': fields.String,
  72. 'created_at': TimestampField,
  73. 'tokens': fields.Integer,
  74. 'indexing_status': fields.String,
  75. 'error': fields.String,
  76. 'enabled': fields.Boolean,
  77. 'disabled_at': TimestampField,
  78. 'disabled_by': fields.String,
  79. 'archived': fields.Boolean,
  80. 'display_status': fields.String,
  81. 'word_count': fields.Integer,
  82. 'hit_count': fields.Integer,
  83. 'completed_segments': fields.Integer,
  84. 'total_segments': fields.Integer
  85. }
  86. class DocumentResource(Resource):
  87. def get_document(self, dataset_id: str, document_id: str) -> Document:
  88. dataset = DatasetService.get_dataset(dataset_id)
  89. if not dataset:
  90. raise NotFound('Dataset not found.')
  91. try:
  92. DatasetService.check_dataset_permission(dataset, current_user)
  93. except services.errors.account.NoPermissionError as e:
  94. raise Forbidden(str(e))
  95. document = DocumentService.get_document(dataset_id, document_id)
  96. if not document:
  97. raise NotFound('Document not found.')
  98. if document.tenant_id != current_user.current_tenant_id:
  99. raise Forbidden('No permission.')
  100. return document
  101. def get_batch_documents(self, dataset_id: str, batch: str) -> List[Document]:
  102. dataset = DatasetService.get_dataset(dataset_id)
  103. if not dataset:
  104. raise NotFound('Dataset not found.')
  105. try:
  106. DatasetService.check_dataset_permission(dataset, current_user)
  107. except services.errors.account.NoPermissionError as e:
  108. raise Forbidden(str(e))
  109. documents = DocumentService.get_batch_documents(dataset_id, batch)
  110. if not documents:
  111. raise NotFound('Documents not found.')
  112. return documents
  113. class GetProcessRuleApi(Resource):
  114. @setup_required
  115. @login_required
  116. @account_initialization_required
  117. def get(self):
  118. req_data = request.args
  119. document_id = req_data.get('document_id')
  120. if document_id:
  121. # get the latest process rule
  122. document = Document.query.get_or_404(document_id)
  123. dataset = DatasetService.get_dataset(document.dataset_id)
  124. if not dataset:
  125. raise NotFound('Dataset not found.')
  126. try:
  127. DatasetService.check_dataset_permission(dataset, current_user)
  128. except services.errors.account.NoPermissionError as e:
  129. raise Forbidden(str(e))
  130. # get the latest process rule
  131. dataset_process_rule = db.session.query(DatasetProcessRule). \
  132. filter(DatasetProcessRule.dataset_id == document.dataset_id). \
  133. order_by(DatasetProcessRule.created_at.desc()). \
  134. limit(1). \
  135. one_or_none()
  136. mode = dataset_process_rule.mode
  137. rules = dataset_process_rule.rules_dict
  138. else:
  139. mode = DocumentService.DEFAULT_RULES['mode']
  140. rules = DocumentService.DEFAULT_RULES['rules']
  141. return {
  142. 'mode': mode,
  143. 'rules': rules
  144. }
  145. class DatasetDocumentListApi(Resource):
  146. @setup_required
  147. @login_required
  148. @account_initialization_required
  149. def get(self, dataset_id):
  150. dataset_id = str(dataset_id)
  151. page = request.args.get('page', default=1, type=int)
  152. limit = request.args.get('limit', default=20, type=int)
  153. search = request.args.get('keyword', default=None, type=str)
  154. sort = request.args.get('sort', default='-created_at', type=str)
  155. fetch = request.args.get('fetch', default=False, type=bool)
  156. dataset = DatasetService.get_dataset(dataset_id)
  157. if not dataset:
  158. raise NotFound('Dataset not found.')
  159. try:
  160. DatasetService.check_dataset_permission(dataset, current_user)
  161. except services.errors.account.NoPermissionError as e:
  162. raise Forbidden(str(e))
  163. query = Document.query.filter_by(
  164. dataset_id=str(dataset_id), tenant_id=current_user.current_tenant_id)
  165. if search:
  166. search = f'%{search}%'
  167. query = query.filter(Document.name.like(search))
  168. if sort.startswith('-'):
  169. sort_logic = desc
  170. sort = sort[1:]
  171. else:
  172. sort_logic = asc
  173. if sort == 'hit_count':
  174. sub_query = db.select(DocumentSegment.document_id,
  175. db.func.sum(DocumentSegment.hit_count).label("total_hit_count")) \
  176. .group_by(DocumentSegment.document_id) \
  177. .subquery()
  178. query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id) \
  179. .order_by(sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)))
  180. elif sort == 'created_at':
  181. query = query.order_by(sort_logic(Document.created_at))
  182. else:
  183. query = query.order_by(desc(Document.created_at))
  184. paginated_documents = query.paginate(
  185. page=page, per_page=limit, max_per_page=100, error_out=False)
  186. documents = paginated_documents.items
  187. if fetch:
  188. for document in documents:
  189. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  190. DocumentSegment.document_id == str(document.id),
  191. DocumentSegment.status != 're_segment').count()
  192. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  193. DocumentSegment.status != 're_segment').count()
  194. document.completed_segments = completed_segments
  195. document.total_segments = total_segments
  196. data = marshal(documents, document_with_segments_fields)
  197. else:
  198. data = marshal(documents, document_fields)
  199. response = {
  200. 'data': data,
  201. 'has_more': len(documents) == limit,
  202. 'limit': limit,
  203. 'total': paginated_documents.total,
  204. 'page': page
  205. }
  206. return response
  207. documents_and_batch_fields = {
  208. 'documents': fields.List(fields.Nested(document_fields)),
  209. 'batch': fields.String
  210. }
  211. @setup_required
  212. @login_required
  213. @account_initialization_required
  214. @marshal_with(documents_and_batch_fields)
  215. def post(self, dataset_id):
  216. dataset_id = str(dataset_id)
  217. dataset = DatasetService.get_dataset(dataset_id)
  218. if not dataset:
  219. raise NotFound('Dataset not found.')
  220. # The role of the current user in the ta table must be admin or owner
  221. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  222. raise Forbidden()
  223. try:
  224. DatasetService.check_dataset_permission(dataset, current_user)
  225. except services.errors.account.NoPermissionError as e:
  226. raise Forbidden(str(e))
  227. parser = reqparse.RequestParser()
  228. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  229. location='json')
  230. parser.add_argument('data_source', type=dict, required=False, location='json')
  231. parser.add_argument('process_rule', type=dict, required=False, location='json')
  232. parser.add_argument('duplicate', type=bool, nullable=False, location='json')
  233. parser.add_argument('original_document_id', type=str, required=False, location='json')
  234. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  235. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False, location='json')
  236. args = parser.parse_args()
  237. if not dataset.indexing_technique and not args['indexing_technique']:
  238. raise ValueError('indexing_technique is required.')
  239. # validate args
  240. DocumentService.document_create_args_validate(args)
  241. # check embedding model setting
  242. try:
  243. ModelFactory.get_embedding_model(
  244. tenant_id=current_user.current_tenant_id,
  245. model_provider_name=dataset.embedding_model_provider,
  246. model_name=dataset.embedding_model
  247. )
  248. except LLMBadRequestError:
  249. raise ProviderNotInitializeError(
  250. f"No Embedding Model available. Please configure a valid provider "
  251. f"in the Settings -> Model Provider.")
  252. except ProviderTokenNotInitError as ex:
  253. raise ProviderNotInitializeError(ex.description)
  254. try:
  255. documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
  256. except ProviderTokenNotInitError as ex:
  257. raise ProviderNotInitializeError(ex.description)
  258. except QuotaExceededError:
  259. raise ProviderQuotaExceededError()
  260. except ModelCurrentlyNotSupportError:
  261. raise ProviderModelCurrentlyNotSupportError()
  262. return {
  263. 'documents': documents,
  264. 'batch': batch
  265. }
  266. class DatasetInitApi(Resource):
  267. dataset_and_document_fields = {
  268. 'dataset': fields.Nested(dataset_fields),
  269. 'documents': fields.List(fields.Nested(document_fields)),
  270. 'batch': fields.String
  271. }
  272. @setup_required
  273. @login_required
  274. @account_initialization_required
  275. @marshal_with(dataset_and_document_fields)
  276. def post(self):
  277. # The role of the current user in the ta table must be admin or owner
  278. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  279. raise Forbidden()
  280. parser = reqparse.RequestParser()
  281. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, required=True,
  282. nullable=False, location='json')
  283. parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
  284. parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
  285. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  286. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False, location='json')
  287. args = parser.parse_args()
  288. try:
  289. ModelFactory.get_embedding_model(
  290. tenant_id=current_user.current_tenant_id
  291. )
  292. except LLMBadRequestError:
  293. raise ProviderNotInitializeError(
  294. f"No Embedding Model available. Please configure a valid provider "
  295. f"in the Settings -> Model Provider.")
  296. # validate args
  297. DocumentService.document_create_args_validate(args)
  298. try:
  299. dataset, documents, batch = DocumentService.save_document_without_dataset_id(
  300. tenant_id=current_user.current_tenant_id,
  301. document_data=args,
  302. account=current_user
  303. )
  304. except ProviderTokenNotInitError as ex:
  305. raise ProviderNotInitializeError(ex.description)
  306. except QuotaExceededError:
  307. raise ProviderQuotaExceededError()
  308. except ModelCurrentlyNotSupportError:
  309. raise ProviderModelCurrentlyNotSupportError()
  310. response = {
  311. 'dataset': dataset,
  312. 'documents': documents,
  313. 'batch': batch
  314. }
  315. return response
  316. class DocumentIndexingEstimateApi(DocumentResource):
  317. @setup_required
  318. @login_required
  319. @account_initialization_required
  320. def get(self, dataset_id, document_id):
  321. dataset_id = str(dataset_id)
  322. document_id = str(document_id)
  323. document = self.get_document(dataset_id, document_id)
  324. if document.indexing_status in ['completed', 'error']:
  325. raise DocumentAlreadyFinishedError()
  326. data_process_rule = document.dataset_process_rule
  327. data_process_rule_dict = data_process_rule.to_dict()
  328. response = {
  329. "tokens": 0,
  330. "total_price": 0,
  331. "currency": "USD",
  332. "total_segments": 0,
  333. "preview": []
  334. }
  335. if document.data_source_type == 'upload_file':
  336. data_source_info = document.data_source_info_dict
  337. if data_source_info and 'upload_file_id' in data_source_info:
  338. file_id = data_source_info['upload_file_id']
  339. file = db.session.query(UploadFile).filter(
  340. UploadFile.tenant_id == document.tenant_id,
  341. UploadFile.id == file_id
  342. ).first()
  343. # raise error if file not found
  344. if not file:
  345. raise NotFound('File not found.')
  346. indexing_runner = IndexingRunner()
  347. try:
  348. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, [file],
  349. data_process_rule_dict, None, dataset_id)
  350. except LLMBadRequestError:
  351. raise ProviderNotInitializeError(
  352. f"No Embedding Model available. Please configure a valid provider "
  353. f"in the Settings -> Model Provider.")
  354. except ProviderTokenNotInitError as ex:
  355. raise ProviderNotInitializeError(ex.description)
  356. return response
  357. class DocumentBatchIndexingEstimateApi(DocumentResource):
  358. @setup_required
  359. @login_required
  360. @account_initialization_required
  361. def get(self, dataset_id, batch):
  362. dataset_id = str(dataset_id)
  363. batch = str(batch)
  364. dataset = DatasetService.get_dataset(dataset_id)
  365. if dataset is None:
  366. raise NotFound("Dataset not found.")
  367. documents = self.get_batch_documents(dataset_id, batch)
  368. response = {
  369. "tokens": 0,
  370. "total_price": 0,
  371. "currency": "USD",
  372. "total_segments": 0,
  373. "preview": []
  374. }
  375. if not documents:
  376. return response
  377. data_process_rule = documents[0].dataset_process_rule
  378. data_process_rule_dict = data_process_rule.to_dict()
  379. info_list = []
  380. for document in documents:
  381. if document.indexing_status in ['completed', 'error']:
  382. raise DocumentAlreadyFinishedError()
  383. data_source_info = document.data_source_info_dict
  384. # format document files info
  385. if data_source_info and 'upload_file_id' in data_source_info:
  386. file_id = data_source_info['upload_file_id']
  387. info_list.append(file_id)
  388. # format document notion info
  389. elif data_source_info and 'notion_workspace_id' in data_source_info and 'notion_page_id' in data_source_info:
  390. pages = []
  391. page = {
  392. 'page_id': data_source_info['notion_page_id'],
  393. 'type': data_source_info['type']
  394. }
  395. pages.append(page)
  396. notion_info = {
  397. 'workspace_id': data_source_info['notion_workspace_id'],
  398. 'pages': pages
  399. }
  400. info_list.append(notion_info)
  401. if dataset.data_source_type == 'upload_file':
  402. file_details = db.session.query(UploadFile).filter(
  403. UploadFile.tenant_id == current_user.current_tenant_id,
  404. UploadFile.id in info_list
  405. ).all()
  406. if file_details is None:
  407. raise NotFound("File not found.")
  408. indexing_runner = IndexingRunner()
  409. try:
  410. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
  411. data_process_rule_dict, None, dataset_id)
  412. except LLMBadRequestError:
  413. raise ProviderNotInitializeError(
  414. f"No Embedding Model available. Please configure a valid provider "
  415. f"in the Settings -> Model Provider.")
  416. except ProviderTokenNotInitError as ex:
  417. raise ProviderNotInitializeError(ex.description)
  418. elif dataset.data_source_type == 'notion_import':
  419. indexing_runner = IndexingRunner()
  420. try:
  421. response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
  422. info_list,
  423. data_process_rule_dict,
  424. None, dataset_id)
  425. except LLMBadRequestError:
  426. raise ProviderNotInitializeError(
  427. f"No Embedding Model available. Please configure a valid provider "
  428. f"in the Settings -> Model Provider.")
  429. except ProviderTokenNotInitError as ex:
  430. raise ProviderNotInitializeError(ex.description)
  431. else:
  432. raise ValueError('Data source type not support')
  433. return response
  434. class DocumentBatchIndexingStatusApi(DocumentResource):
  435. document_status_fields = {
  436. 'id': fields.String,
  437. 'indexing_status': fields.String,
  438. 'processing_started_at': TimestampField,
  439. 'parsing_completed_at': TimestampField,
  440. 'cleaning_completed_at': TimestampField,
  441. 'splitting_completed_at': TimestampField,
  442. 'completed_at': TimestampField,
  443. 'paused_at': TimestampField,
  444. 'error': fields.String,
  445. 'stopped_at': TimestampField,
  446. 'completed_segments': fields.Integer,
  447. 'total_segments': fields.Integer,
  448. }
  449. document_status_fields_list = {
  450. 'data': fields.List(fields.Nested(document_status_fields))
  451. }
  452. @setup_required
  453. @login_required
  454. @account_initialization_required
  455. def get(self, dataset_id, batch):
  456. dataset_id = str(dataset_id)
  457. batch = str(batch)
  458. documents = self.get_batch_documents(dataset_id, batch)
  459. documents_status = []
  460. for document in documents:
  461. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  462. DocumentSegment.document_id == str(document.id),
  463. DocumentSegment.status != 're_segment').count()
  464. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  465. DocumentSegment.status != 're_segment').count()
  466. document.completed_segments = completed_segments
  467. document.total_segments = total_segments
  468. if document.is_paused:
  469. document.indexing_status = 'paused'
  470. documents_status.append(marshal(document, self.document_status_fields))
  471. data = {
  472. 'data': documents_status
  473. }
  474. return data
  475. class DocumentIndexingStatusApi(DocumentResource):
  476. document_status_fields = {
  477. 'id': fields.String,
  478. 'indexing_status': fields.String,
  479. 'processing_started_at': TimestampField,
  480. 'parsing_completed_at': TimestampField,
  481. 'cleaning_completed_at': TimestampField,
  482. 'splitting_completed_at': TimestampField,
  483. 'completed_at': TimestampField,
  484. 'paused_at': TimestampField,
  485. 'error': fields.String,
  486. 'stopped_at': TimestampField,
  487. 'completed_segments': fields.Integer,
  488. 'total_segments': fields.Integer,
  489. }
  490. @setup_required
  491. @login_required
  492. @account_initialization_required
  493. def get(self, dataset_id, document_id):
  494. dataset_id = str(dataset_id)
  495. document_id = str(document_id)
  496. document = self.get_document(dataset_id, document_id)
  497. completed_segments = DocumentSegment.query \
  498. .filter(DocumentSegment.completed_at.isnot(None),
  499. DocumentSegment.document_id == str(document_id),
  500. DocumentSegment.status != 're_segment') \
  501. .count()
  502. total_segments = DocumentSegment.query \
  503. .filter(DocumentSegment.document_id == str(document_id),
  504. DocumentSegment.status != 're_segment') \
  505. .count()
  506. document.completed_segments = completed_segments
  507. document.total_segments = total_segments
  508. if document.is_paused:
  509. document.indexing_status = 'paused'
  510. return marshal(document, self.document_status_fields)
  511. class DocumentDetailApi(DocumentResource):
  512. METADATA_CHOICES = {'all', 'only', 'without'}
  513. @setup_required
  514. @login_required
  515. @account_initialization_required
  516. def get(self, dataset_id, document_id):
  517. dataset_id = str(dataset_id)
  518. document_id = str(document_id)
  519. document = self.get_document(dataset_id, document_id)
  520. metadata = request.args.get('metadata', 'all')
  521. if metadata not in self.METADATA_CHOICES:
  522. raise InvalidMetadataError(f'Invalid metadata value: {metadata}')
  523. if metadata == 'only':
  524. response = {
  525. 'id': document.id,
  526. 'doc_type': document.doc_type,
  527. 'doc_metadata': document.doc_metadata
  528. }
  529. elif metadata == 'without':
  530. process_rules = DatasetService.get_process_rules(dataset_id)
  531. data_source_info = document.data_source_detail_dict
  532. response = {
  533. 'id': document.id,
  534. 'position': document.position,
  535. 'data_source_type': document.data_source_type,
  536. 'data_source_info': data_source_info,
  537. 'dataset_process_rule_id': document.dataset_process_rule_id,
  538. 'dataset_process_rule': process_rules,
  539. 'name': document.name,
  540. 'created_from': document.created_from,
  541. 'created_by': document.created_by,
  542. 'created_at': document.created_at.timestamp(),
  543. 'tokens': document.tokens,
  544. 'indexing_status': document.indexing_status,
  545. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  546. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  547. 'indexing_latency': document.indexing_latency,
  548. 'error': document.error,
  549. 'enabled': document.enabled,
  550. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  551. 'disabled_by': document.disabled_by,
  552. 'archived': document.archived,
  553. 'segment_count': document.segment_count,
  554. 'average_segment_length': document.average_segment_length,
  555. 'hit_count': document.hit_count,
  556. 'display_status': document.display_status,
  557. 'doc_form': document.doc_form
  558. }
  559. else:
  560. process_rules = DatasetService.get_process_rules(dataset_id)
  561. data_source_info = document.data_source_detail_dict_()
  562. response = {
  563. 'id': document.id,
  564. 'position': document.position,
  565. 'data_source_type': document.data_source_type,
  566. 'data_source_info': data_source_info,
  567. 'dataset_process_rule_id': document.dataset_process_rule_id,
  568. 'dataset_process_rule': process_rules,
  569. 'name': document.name,
  570. 'created_from': document.created_from,
  571. 'created_by': document.created_by,
  572. 'created_at': document.created_at.timestamp(),
  573. 'tokens': document.tokens,
  574. 'indexing_status': document.indexing_status,
  575. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  576. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  577. 'indexing_latency': document.indexing_latency,
  578. 'error': document.error,
  579. 'enabled': document.enabled,
  580. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  581. 'disabled_by': document.disabled_by,
  582. 'archived': document.archived,
  583. 'doc_type': document.doc_type,
  584. 'doc_metadata': document.doc_metadata,
  585. 'segment_count': document.segment_count,
  586. 'average_segment_length': document.average_segment_length,
  587. 'hit_count': document.hit_count,
  588. 'display_status': document.display_status,
  589. 'doc_form': document.doc_form
  590. }
  591. return response, 200
  592. class DocumentProcessingApi(DocumentResource):
  593. @setup_required
  594. @login_required
  595. @account_initialization_required
  596. def patch(self, dataset_id, document_id, action):
  597. dataset_id = str(dataset_id)
  598. document_id = str(document_id)
  599. document = self.get_document(dataset_id, document_id)
  600. # The role of the current user in the ta table must be admin or owner
  601. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  602. raise Forbidden()
  603. if action == "pause":
  604. if document.indexing_status != "indexing":
  605. raise InvalidActionError('Document not in indexing state.')
  606. document.paused_by = current_user.id
  607. document.paused_at = datetime.utcnow()
  608. document.is_paused = True
  609. db.session.commit()
  610. elif action == "resume":
  611. if document.indexing_status not in ["paused", "error"]:
  612. raise InvalidActionError('Document not in paused or error state.')
  613. document.paused_by = None
  614. document.paused_at = None
  615. document.is_paused = False
  616. db.session.commit()
  617. else:
  618. raise InvalidActionError()
  619. return {'result': 'success'}, 200
  620. class DocumentDeleteApi(DocumentResource):
  621. @setup_required
  622. @login_required
  623. @account_initialization_required
  624. def delete(self, dataset_id, document_id):
  625. dataset_id = str(dataset_id)
  626. document_id = str(document_id)
  627. document = self.get_document(dataset_id, document_id)
  628. try:
  629. DocumentService.delete_document(document)
  630. except services.errors.document.DocumentIndexingError:
  631. raise DocumentIndexingError('Cannot delete document during indexing.')
  632. return {'result': 'success'}, 204
  633. class DocumentMetadataApi(DocumentResource):
  634. @setup_required
  635. @login_required
  636. @account_initialization_required
  637. def put(self, dataset_id, document_id):
  638. dataset_id = str(dataset_id)
  639. document_id = str(document_id)
  640. document = self.get_document(dataset_id, document_id)
  641. req_data = request.get_json()
  642. doc_type = req_data.get('doc_type')
  643. doc_metadata = req_data.get('doc_metadata')
  644. # The role of the current user in the ta table must be admin or owner
  645. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  646. raise Forbidden()
  647. if doc_type is None or doc_metadata is None:
  648. raise ValueError('Both doc_type and doc_metadata must be provided.')
  649. if doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
  650. raise ValueError('Invalid doc_type.')
  651. if not isinstance(doc_metadata, dict):
  652. raise ValueError('doc_metadata must be a dictionary.')
  653. metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
  654. document.doc_metadata = {}
  655. for key, value_type in metadata_schema.items():
  656. value = doc_metadata.get(key)
  657. if value is not None and isinstance(value, value_type):
  658. document.doc_metadata[key] = value
  659. document.doc_type = doc_type
  660. document.updated_at = datetime.utcnow()
  661. db.session.commit()
  662. return {'result': 'success', 'message': 'Document metadata updated.'}, 200
  663. class DocumentStatusApi(DocumentResource):
  664. @setup_required
  665. @login_required
  666. @account_initialization_required
  667. def patch(self, dataset_id, document_id, action):
  668. dataset_id = str(dataset_id)
  669. document_id = str(document_id)
  670. document = self.get_document(dataset_id, document_id)
  671. # The role of the current user in the ta table must be admin or owner
  672. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  673. raise Forbidden()
  674. indexing_cache_key = 'document_{}_indexing'.format(document.id)
  675. cache_result = redis_client.get(indexing_cache_key)
  676. if cache_result is not None:
  677. raise InvalidActionError("Document is being indexed, please try again later")
  678. if action == "enable":
  679. if document.enabled:
  680. raise InvalidActionError('Document already enabled.')
  681. document.enabled = True
  682. document.disabled_at = None
  683. document.disabled_by = None
  684. document.updated_at = datetime.utcnow()
  685. db.session.commit()
  686. # Set cache to prevent indexing the same document multiple times
  687. redis_client.setex(indexing_cache_key, 600, 1)
  688. add_document_to_index_task.delay(document_id)
  689. return {'result': 'success'}, 200
  690. elif action == "disable":
  691. if not document.completed_at or document.indexing_status != 'completed':
  692. raise InvalidActionError('Document is not completed.')
  693. if not document.enabled:
  694. raise InvalidActionError('Document already disabled.')
  695. document.enabled = False
  696. document.disabled_at = datetime.utcnow()
  697. document.disabled_by = current_user.id
  698. document.updated_at = datetime.utcnow()
  699. db.session.commit()
  700. # Set cache to prevent indexing the same document multiple times
  701. redis_client.setex(indexing_cache_key, 600, 1)
  702. remove_document_from_index_task.delay(document_id)
  703. return {'result': 'success'}, 200
  704. elif action == "archive":
  705. if document.archived:
  706. raise InvalidActionError('Document already archived.')
  707. document.archived = True
  708. document.archived_at = datetime.utcnow()
  709. document.archived_by = current_user.id
  710. document.updated_at = datetime.utcnow()
  711. db.session.commit()
  712. if document.enabled:
  713. # Set cache to prevent indexing the same document multiple times
  714. redis_client.setex(indexing_cache_key, 600, 1)
  715. remove_document_from_index_task.delay(document_id)
  716. return {'result': 'success'}, 200
  717. elif action == "un_archive":
  718. if not document.archived:
  719. raise InvalidActionError('Document is not archived.')
  720. document.archived = False
  721. document.archived_at = None
  722. document.archived_by = None
  723. document.updated_at = datetime.utcnow()
  724. db.session.commit()
  725. # Set cache to prevent indexing the same document multiple times
  726. redis_client.setex(indexing_cache_key, 600, 1)
  727. add_document_to_index_task.delay(document_id)
  728. return {'result': 'success'}, 200
  729. else:
  730. raise InvalidActionError()
  731. class DocumentPauseApi(DocumentResource):
  732. def patch(self, dataset_id, document_id):
  733. """pause document."""
  734. dataset_id = str(dataset_id)
  735. document_id = str(document_id)
  736. dataset = DatasetService.get_dataset(dataset_id)
  737. if not dataset:
  738. raise NotFound('Dataset not found.')
  739. document = DocumentService.get_document(dataset.id, document_id)
  740. # 404 if document not found
  741. if document is None:
  742. raise NotFound("Document Not Exists.")
  743. # 403 if document is archived
  744. if DocumentService.check_archived(document):
  745. raise ArchivedDocumentImmutableError()
  746. try:
  747. # pause document
  748. DocumentService.pause_document(document)
  749. except services.errors.document.DocumentIndexingError:
  750. raise DocumentIndexingError('Cannot pause completed document.')
  751. return {'result': 'success'}, 204
  752. class DocumentRecoverApi(DocumentResource):
  753. def patch(self, dataset_id, document_id):
  754. """recover document."""
  755. dataset_id = str(dataset_id)
  756. document_id = str(document_id)
  757. dataset = DatasetService.get_dataset(dataset_id)
  758. if not dataset:
  759. raise NotFound('Dataset not found.')
  760. document = DocumentService.get_document(dataset.id, document_id)
  761. # 404 if document not found
  762. if document is None:
  763. raise NotFound("Document Not Exists.")
  764. # 403 if document is archived
  765. if DocumentService.check_archived(document):
  766. raise ArchivedDocumentImmutableError()
  767. try:
  768. # pause document
  769. DocumentService.recover_document(document)
  770. except services.errors.document.DocumentIndexingError:
  771. raise DocumentIndexingError('Document is not in paused status.')
  772. return {'result': 'success'}, 204
  773. api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
  774. api.add_resource(DatasetDocumentListApi,
  775. '/datasets/<uuid:dataset_id>/documents')
  776. api.add_resource(DatasetInitApi,
  777. '/datasets/init')
  778. api.add_resource(DocumentIndexingEstimateApi,
  779. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate')
  780. api.add_resource(DocumentBatchIndexingEstimateApi,
  781. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate')
  782. api.add_resource(DocumentBatchIndexingStatusApi,
  783. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status')
  784. api.add_resource(DocumentIndexingStatusApi,
  785. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status')
  786. api.add_resource(DocumentDetailApi,
  787. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  788. api.add_resource(DocumentProcessingApi,
  789. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/<string:action>')
  790. api.add_resource(DocumentDeleteApi,
  791. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  792. api.add_resource(DocumentMetadataApi,
  793. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata')
  794. api.add_resource(DocumentStatusApi,
  795. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
  796. api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
  797. api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')