Skip to content

Commit

Permalink
Revert "Feat/support parent child chunk (#12092)"
Browse files Browse the repository at this point in the history
This reverts commit 9231fdb.

Signed-off-by: -LAN- <laipz8200@outlook.com>
  • Loading branch information
laipz8200 committed Jan 2, 2025
1 parent c3d7fe7 commit 059f660
Show file tree
Hide file tree
Showing 54 changed files with 806 additions and 2,576 deletions.
2 changes: 1 addition & 1 deletion api/controllers/console/datasets/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def post(self):
args["doc_form"],
args["doc_language"],
)
return response.model_dump(), 200
return response, 200


class DataSourceNotionDatasetSyncApi(Resource):
Expand Down
15 changes: 1 addition & 14 deletions api/controllers/console/datasets/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ def post(self):
except Exception as e:
raise IndexingEstimateError(str(e))

return response.model_dump(), 200
return response, 200


class DatasetRelatedAppListApi(Resource):
Expand Down Expand Up @@ -733,18 +733,6 @@ def get(self, dataset_id):
}, 200


class DatasetAutoDisableLogApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200


api.add_resource(DatasetListApi, "/datasets")
api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
Expand All @@ -759,4 +747,3 @@ def get(self, dataset_id):
api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
api.add_resource(DatasetAutoDisableLogApi, "/datasets/<uuid:dataset_id>/auto-disable-logs")
178 changes: 79 additions & 99 deletions api/controllers/console/datasets/datasets_document.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
from libs.login import login_required
from models import Dataset, DatasetProcessRule, Document, DocumentSegment, UploadFile
from services.dataset_service import DatasetService, DocumentService
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
from tasks.add_document_to_index_task import add_document_to_index_task
from tasks.remove_document_from_index_task import remove_document_from_index_task

Expand Down Expand Up @@ -256,22 +255,20 @@ def post(self, dataset_id):
parser.add_argument("duplicate", type=bool, default=True, nullable=False, location="json")
parser.add_argument("original_document_id", type=str, required=False, location="json")
parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")

parser.add_argument(
"doc_language", type=str, default="English", required=False, nullable=False, location="json"
)
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
args = parser.parse_args()
knowledge_config = KnowledgeConfig(**args)

if not dataset.indexing_technique and not knowledge_config.indexing_technique:
if not dataset.indexing_technique and not args["indexing_technique"]:
raise ValueError("indexing_technique is required.")

# validate args
DocumentService.document_create_args_validate(knowledge_config)
DocumentService.document_create_args_validate(args)

try:
documents, batch = DocumentService.save_document_with_dataset_id(dataset, knowledge_config, current_user)
documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
Expand All @@ -281,25 +278,6 @@ def post(self, dataset_id):

return {"documents": documents, "batch": batch}

@setup_required
@login_required
@account_initialization_required
def delete(self, dataset_id):
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
if dataset is None:
raise NotFound("Dataset not found.")
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)

try:
document_ids = request.args.getlist("document_id")
DocumentService.delete_documents(dataset, document_ids)
except services.errors.document.DocumentIndexingError:
raise DocumentIndexingError("Cannot delete document during indexing.")

return {"result": "success"}, 204


class DatasetInitApi(Resource):
@setup_required
Expand Down Expand Up @@ -335,9 +313,9 @@ def post(self):
# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
if not current_user.is_dataset_editor:
raise Forbidden()
knowledge_config = KnowledgeConfig(**args)
if knowledge_config.indexing_technique == "high_quality":
if knowledge_config.embedding_model is None or knowledge_config.embedding_model_provider is None:

if args["indexing_technique"] == "high_quality":
if args["embedding_model"] is None or args["embedding_model_provider"] is None:
raise ValueError("embedding model and embedding model provider are required for high quality indexing.")
try:
model_manager = ModelManager()
Expand All @@ -356,11 +334,11 @@ def post(self):
raise ProviderNotInitializeError(ex.description)

# validate args
DocumentService.document_create_args_validate(knowledge_config)
DocumentService.document_create_args_validate(args)

try:
dataset, documents, batch = DocumentService.save_document_without_dataset_id(
tenant_id=current_user.current_tenant_id, knowledge_config=knowledge_config, account=current_user
tenant_id=current_user.current_tenant_id, document_data=args, account=current_user
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
Expand Down Expand Up @@ -431,7 +409,7 @@ def get(self, dataset_id, document_id):
except Exception as e:
raise IndexingEstimateError(str(e))

return response.model_dump(), 200
return response


class DocumentBatchIndexingEstimateApi(DocumentResource):
Expand All @@ -443,7 +421,7 @@ def get(self, dataset_id, batch):
batch = str(batch)
documents = self.get_batch_documents(dataset_id, batch)
if not documents:
return {"tokens": 0, "total_price": 0, "currency": "USD", "total_segments": 0, "preview": []}, 200
return {"tokens": 0, "total_price": 0, "currency": "USD", "total_segments": 0, "preview": []}
data_process_rule = documents[0].dataset_process_rule
data_process_rule_dict = data_process_rule.to_dict()
info_list = []
Expand Down Expand Up @@ -530,7 +508,7 @@ def get(self, dataset_id, batch):
raise ProviderNotInitializeError(ex.description)
except Exception as e:
raise IndexingEstimateError(str(e))
return response.model_dump(), 200
return response


class DocumentBatchIndexingStatusApi(DocumentResource):
Expand Down Expand Up @@ -603,17 +581,15 @@ def get(self, dataset_id, document_id):
if metadata == "only":
response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata}
elif metadata == "without":
dataset_process_rules = DatasetService.get_process_rules(dataset_id)
document_process_rules = document.dataset_process_rule.to_dict()
process_rules = DatasetService.get_process_rules(dataset_id)
data_source_info = document.data_source_detail_dict
response = {
"id": document.id,
"position": document.position,
"data_source_type": document.data_source_type,
"data_source_info": data_source_info,
"dataset_process_rule_id": document.dataset_process_rule_id,
"dataset_process_rule": dataset_process_rules,
"document_process_rule": document_process_rules,
"dataset_process_rule": process_rules,
"name": document.name,
"created_from": document.created_from,
"created_by": document.created_by,
Expand All @@ -636,17 +612,15 @@ def get(self, dataset_id, document_id):
"doc_language": document.doc_language,
}
else:
dataset_process_rules = DatasetService.get_process_rules(dataset_id)
document_process_rules = document.dataset_process_rule.to_dict()
process_rules = DatasetService.get_process_rules(dataset_id)
data_source_info = document.data_source_detail_dict
response = {
"id": document.id,
"position": document.position,
"data_source_type": document.data_source_type,
"data_source_info": data_source_info,
"dataset_process_rule_id": document.dataset_process_rule_id,
"dataset_process_rule": dataset_process_rules,
"document_process_rule": document_process_rules,
"dataset_process_rule": process_rules,
"name": document.name,
"created_from": document.created_from,
"created_by": document.created_by,
Expand Down Expand Up @@ -782,8 +756,9 @@ class DocumentStatusApi(DocumentResource):
@login_required
@account_initialization_required
@cloud_edition_billing_resource_check("vector_space")
def patch(self, dataset_id, action):
def patch(self, dataset_id, document_id, action):
dataset_id = str(dataset_id)
document_id = str(document_id)
dataset = DatasetService.get_dataset(dataset_id)
if dataset is None:
raise NotFound("Dataset not found.")
Expand All @@ -798,79 +773,84 @@ def patch(self, dataset_id, action):
# check user's permission
DatasetService.check_dataset_permission(dataset, current_user)

document_ids = request.args.getlist("document_id")
for document_id in document_ids:
document = self.get_document(dataset_id, document_id)
document = self.get_document(dataset_id, document_id)

indexing_cache_key = "document_{}_indexing".format(document.id)
cache_result = redis_client.get(indexing_cache_key)
if cache_result is not None:
raise InvalidActionError(f"Document:{document.name} is being indexed, please try again later")
indexing_cache_key = "document_{}_indexing".format(document.id)
cache_result = redis_client.get(indexing_cache_key)
if cache_result is not None:
raise InvalidActionError("Document is being indexed, please try again later")

if action == "enable":
if document.enabled:
continue
document.enabled = True
document.disabled_at = None
document.disabled_by = None
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()
if action == "enable":
if document.enabled:
raise InvalidActionError("Document already enabled.")

# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)
document.enabled = True
document.disabled_at = None
document.disabled_by = None
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()

add_document_to_index_task.delay(document_id)
# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)

elif action == "disable":
if not document.completed_at or document.indexing_status != "completed":
raise InvalidActionError(f"Document: {document.name} is not completed.")
if not document.enabled:
continue
add_document_to_index_task.delay(document_id)

document.enabled = False
document.disabled_at = datetime.now(UTC).replace(tzinfo=None)
document.disabled_by = current_user.id
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()
return {"result": "success"}, 200

# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)
elif action == "disable":
if not document.completed_at or document.indexing_status != "completed":
raise InvalidActionError("Document is not completed.")
if not document.enabled:
raise InvalidActionError("Document already disabled.")

remove_document_from_index_task.delay(document_id)
document.enabled = False
document.disabled_at = datetime.now(UTC).replace(tzinfo=None)
document.disabled_by = current_user.id
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()

elif action == "archive":
if document.archived:
continue
# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)

document.archived = True
document.archived_at = datetime.now(UTC).replace(tzinfo=None)
document.archived_by = current_user.id
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()
remove_document_from_index_task.delay(document_id)

if document.enabled:
# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)
return {"result": "success"}, 200

remove_document_from_index_task.delay(document_id)
elif action == "archive":
if document.archived:
raise InvalidActionError("Document already archived.")

elif action == "un_archive":
if not document.archived:
continue
document.archived = False
document.archived_at = None
document.archived_by = None
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()
document.archived = True
document.archived_at = datetime.now(UTC).replace(tzinfo=None)
document.archived_by = current_user.id
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()

if document.enabled:
# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)

add_document_to_index_task.delay(document_id)
remove_document_from_index_task.delay(document_id)

else:
raise InvalidActionError()
return {"result": "success"}, 200
return {"result": "success"}, 200
elif action == "un_archive":
if not document.archived:
raise InvalidActionError("Document is not archived.")

document.archived = False
document.archived_at = None
document.archived_by = None
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
db.session.commit()

# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)

add_document_to_index_task.delay(document_id)

return {"result": "success"}, 200
else:
raise InvalidActionError()


class DocumentPauseApi(DocumentResource):
Expand Down Expand Up @@ -1041,7 +1021,7 @@ def get(self, dataset_id, document_id):
)
api.add_resource(DocumentDeleteApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>")
api.add_resource(DocumentMetadataApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata")
api.add_resource(DocumentStatusApi, "/datasets/<uuid:dataset_id>/documents/status/<string:action>/batch")
api.add_resource(DocumentStatusApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>")
api.add_resource(DocumentPauseApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause")
api.add_resource(DocumentRecoverApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume")
api.add_resource(DocumentRetryApi, "/datasets/<uuid:dataset_id>/retry")
Expand Down
Loading

0 comments on commit 059f660

Please sign in to comment.