clean_unused_datasets_task.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. import datetime
  2. import time
  3. import click
  4. from sqlalchemy import func
  5. from werkzeug.exceptions import NotFound
  6. import app
  7. from configs import dify_config
  8. from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
  9. from extensions.ext_database import db
  10. from extensions.ext_redis import redis_client
  11. from models.dataset import Dataset, DatasetQuery, Document
  12. from services.feature_service import FeatureService
  13. @app.celery.task(queue="dataset")
  14. def clean_unused_datasets_task():
  15. click.echo(click.style("Start clean unused datasets indexes.", fg="green"))
  16. plan_sandbox_clean_day_setting = dify_config.PLAN_SANDBOX_CLEAN_DAY_SETTING
  17. plan_pro_clean_day_setting = dify_config.PLAN_PRO_CLEAN_DAY_SETTING
  18. start_at = time.perf_counter()
  19. plan_sandbox_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_sandbox_clean_day_setting)
  20. plan_pro_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_pro_clean_day_setting)
  21. page = 1
  22. while True:
  23. try:
  24. # Subquery for counting new documents
  25. document_subquery_new = (
  26. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  27. .filter(
  28. Document.indexing_status == "completed",
  29. Document.enabled == True,
  30. Document.archived == False,
  31. Document.updated_at > plan_sandbox_clean_day,
  32. )
  33. .group_by(Document.dataset_id)
  34. .subquery()
  35. )
  36. # Subquery for counting old documents
  37. document_subquery_old = (
  38. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  39. .filter(
  40. Document.indexing_status == "completed",
  41. Document.enabled == True,
  42. Document.archived == False,
  43. Document.updated_at < plan_sandbox_clean_day,
  44. )
  45. .group_by(Document.dataset_id)
  46. .subquery()
  47. )
  48. # Main query with join and filter
  49. datasets = (
  50. db.session.query(Dataset)
  51. .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
  52. .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
  53. .filter(
  54. Dataset.created_at < plan_sandbox_clean_day,
  55. func.coalesce(document_subquery_new.c.document_count, 0) == 0,
  56. func.coalesce(document_subquery_old.c.document_count, 0) > 0,
  57. )
  58. .order_by(Dataset.created_at.desc())
  59. .paginate(page=page, per_page=50)
  60. )
  61. except NotFound:
  62. break
  63. if datasets.items is None or len(datasets.items) == 0:
  64. break
  65. page += 1
  66. for dataset in datasets:
  67. dataset_query = (
  68. db.session.query(DatasetQuery)
  69. .filter(DatasetQuery.created_at > plan_sandbox_clean_day, DatasetQuery.dataset_id == dataset.id)
  70. .all()
  71. )
  72. if not dataset_query or len(dataset_query) == 0:
  73. try:
  74. # remove index
  75. index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor()
  76. index_processor.clean(dataset, None)
  77. # update document
  78. update_params = {Document.enabled: False}
  79. Document.query.filter_by(dataset_id=dataset.id).update(update_params)
  80. db.session.commit()
  81. click.echo(click.style("Cleaned unused dataset {} from db success!".format(dataset.id), fg="green"))
  82. except Exception as e:
  83. click.echo(
  84. click.style("clean dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
  85. )
  86. page = 1
  87. while True:
  88. try:
  89. # Subquery for counting new documents
  90. document_subquery_new = (
  91. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  92. .filter(
  93. Document.indexing_status == "completed",
  94. Document.enabled == True,
  95. Document.archived == False,
  96. Document.updated_at > plan_pro_clean_day,
  97. )
  98. .group_by(Document.dataset_id)
  99. .subquery()
  100. )
  101. # Subquery for counting old documents
  102. document_subquery_old = (
  103. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  104. .filter(
  105. Document.indexing_status == "completed",
  106. Document.enabled == True,
  107. Document.archived == False,
  108. Document.updated_at < plan_pro_clean_day,
  109. )
  110. .group_by(Document.dataset_id)
  111. .subquery()
  112. )
  113. # Main query with join and filter
  114. datasets = (
  115. db.session.query(Dataset)
  116. .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
  117. .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
  118. .filter(
  119. Dataset.created_at < plan_pro_clean_day,
  120. func.coalesce(document_subquery_new.c.document_count, 0) == 0,
  121. func.coalesce(document_subquery_old.c.document_count, 0) > 0,
  122. )
  123. .order_by(Dataset.created_at.desc())
  124. .paginate(page=page, per_page=50)
  125. )
  126. except NotFound:
  127. break
  128. if datasets.items is None or len(datasets.items) == 0:
  129. break
  130. page += 1
  131. for dataset in datasets:
  132. dataset_query = (
  133. db.session.query(DatasetQuery)
  134. .filter(DatasetQuery.created_at > plan_pro_clean_day, DatasetQuery.dataset_id == dataset.id)
  135. .all()
  136. )
  137. if not dataset_query or len(dataset_query) == 0:
  138. try:
  139. features_cache_key = f"features:{dataset.tenant_id}"
  140. plan = redis_client.get(features_cache_key)
  141. if plan is None:
  142. features = FeatureService.get_features(dataset.tenant_id)
  143. redis_client.setex(features_cache_key, 600, features.billing.subscription.plan)
  144. plan = features.billing.subscription.plan
  145. if plan == "sandbox":
  146. # remove index
  147. index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor()
  148. index_processor.clean(dataset, None)
  149. # update document
  150. update_params = {Document.enabled: False}
  151. Document.query.filter_by(dataset_id=dataset.id).update(update_params)
  152. db.session.commit()
  153. click.echo(
  154. click.style("Cleaned unused dataset {} from db success!".format(dataset.id), fg="green")
  155. )
  156. except Exception as e:
  157. click.echo(
  158. click.style("clean dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
  159. )
  160. end_at = time.perf_counter()
  161. click.echo(click.style("Cleaned unused dataset from db success latency: {}".format(end_at - start_at), fg="green"))