node.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. import json
  2. from collections.abc import Generator, Mapping, Sequence
  3. from typing import TYPE_CHECKING, Any, Optional, cast
  4. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  5. from core.entities.model_entities import ModelStatus
  6. from core.entities.provider_entities import QuotaUnit
  7. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  8. from core.memory.token_buffer_memory import TokenBufferMemory
  9. from core.model_manager import ModelInstance, ModelManager
  10. from core.model_runtime.entities import (
  11. AudioPromptMessageContent,
  12. ImagePromptMessageContent,
  13. PromptMessage,
  14. PromptMessageContentType,
  15. TextPromptMessageContent,
  16. )
  17. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  18. from core.model_runtime.entities.model_entities import ModelType
  19. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  20. from core.model_runtime.utils.encoders import jsonable_encoder
  21. from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
  22. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  23. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  24. from core.variables import ArrayAnySegment, ArrayFileSegment, FileSegment, NoneSegment
  25. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  26. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  27. from core.workflow.enums import SystemVariableKey
  28. from core.workflow.graph_engine.entities.event import InNodeEvent
  29. from core.workflow.nodes.base import BaseNode
  30. from core.workflow.nodes.enums import NodeType
  31. from core.workflow.nodes.event import (
  32. ModelInvokeCompletedEvent,
  33. NodeEvent,
  34. RunCompletedEvent,
  35. RunRetrieverResourceEvent,
  36. RunStreamChunkEvent,
  37. )
  38. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  39. from extensions.ext_database import db
  40. from models.model import Conversation
  41. from models.provider import Provider, ProviderType
  42. from models.workflow import WorkflowNodeExecutionStatus
  43. from .entities import (
  44. LLMNodeChatModelMessage,
  45. LLMNodeCompletionModelPromptTemplate,
  46. LLMNodeData,
  47. ModelConfig,
  48. )
  49. if TYPE_CHECKING:
  50. from core.file.models import File
  51. class LLMNode(BaseNode[LLMNodeData]):
  52. _node_data_cls = LLMNodeData
  53. _node_type = NodeType.LLM
  54. def _run(self) -> NodeRunResult | Generator[NodeEvent | InNodeEvent, None, None]:
  55. node_inputs = None
  56. process_data = None
  57. try:
  58. # init messages template
  59. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  60. # fetch variables and fetch values from variable pool
  61. inputs = self._fetch_inputs(node_data=self.node_data)
  62. # fetch jinja2 inputs
  63. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  64. # merge inputs
  65. inputs.update(jinja_inputs)
  66. node_inputs = {}
  67. # fetch files
  68. files = (
  69. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  70. if self.node_data.vision.enabled
  71. else []
  72. )
  73. if files:
  74. node_inputs["#files#"] = [file.to_dict() for file in files]
  75. # fetch context value
  76. generator = self._fetch_context(node_data=self.node_data)
  77. context = None
  78. for event in generator:
  79. if isinstance(event, RunRetrieverResourceEvent):
  80. context = event.context
  81. yield event
  82. if context:
  83. node_inputs["#context#"] = context # type: ignore
  84. # fetch model config
  85. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  86. # fetch memory
  87. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  88. # fetch prompt messages
  89. if self.node_data.memory:
  90. query = self.graph_runtime_state.variable_pool.get((SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY))
  91. if not query:
  92. raise ValueError("Query not found")
  93. query = query.text
  94. else:
  95. query = None
  96. prompt_messages, stop = self._fetch_prompt_messages(
  97. system_query=query,
  98. inputs=inputs,
  99. files=files,
  100. context=context,
  101. memory=memory,
  102. model_config=model_config,
  103. vision_detail=self.node_data.vision.configs.detail,
  104. prompt_template=self.node_data.prompt_template,
  105. memory_config=self.node_data.memory,
  106. )
  107. process_data = {
  108. "model_mode": model_config.mode,
  109. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  110. model_mode=model_config.mode, prompt_messages=prompt_messages
  111. ),
  112. "model_provider": model_config.provider,
  113. "model_name": model_config.model,
  114. }
  115. # handle invoke result
  116. generator = self._invoke_llm(
  117. node_data_model=self.node_data.model,
  118. model_instance=model_instance,
  119. prompt_messages=prompt_messages,
  120. stop=stop,
  121. )
  122. result_text = ""
  123. usage = LLMUsage.empty_usage()
  124. finish_reason = None
  125. for event in generator:
  126. if isinstance(event, RunStreamChunkEvent):
  127. yield event
  128. elif isinstance(event, ModelInvokeCompletedEvent):
  129. result_text = event.text
  130. usage = event.usage
  131. finish_reason = event.finish_reason
  132. break
  133. except Exception as e:
  134. yield RunCompletedEvent(
  135. run_result=NodeRunResult(
  136. status=WorkflowNodeExecutionStatus.FAILED,
  137. error=str(e),
  138. inputs=node_inputs,
  139. process_data=process_data,
  140. )
  141. )
  142. return
  143. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  144. yield RunCompletedEvent(
  145. run_result=NodeRunResult(
  146. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  147. inputs=node_inputs,
  148. process_data=process_data,
  149. outputs=outputs,
  150. metadata={
  151. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  152. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  153. NodeRunMetadataKey.CURRENCY: usage.currency,
  154. },
  155. llm_usage=usage,
  156. )
  157. )
  158. def _invoke_llm(
  159. self,
  160. node_data_model: ModelConfig,
  161. model_instance: ModelInstance,
  162. prompt_messages: list[PromptMessage],
  163. stop: Optional[list[str]] = None,
  164. ) -> Generator[NodeEvent, None, None]:
  165. db.session.close()
  166. invoke_result = model_instance.invoke_llm(
  167. prompt_messages=prompt_messages,
  168. model_parameters=node_data_model.completion_params,
  169. stop=stop,
  170. stream=True,
  171. user=self.user_id,
  172. )
  173. # handle invoke result
  174. generator = self._handle_invoke_result(invoke_result=invoke_result)
  175. usage = LLMUsage.empty_usage()
  176. for event in generator:
  177. yield event
  178. if isinstance(event, ModelInvokeCompletedEvent):
  179. usage = event.usage
  180. # deduct quota
  181. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  182. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  183. if isinstance(invoke_result, LLMResult):
  184. return
  185. model = None
  186. prompt_messages: list[PromptMessage] = []
  187. full_text = ""
  188. usage = None
  189. finish_reason = None
  190. for result in invoke_result:
  191. text = result.delta.message.content
  192. full_text += text
  193. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  194. if not model:
  195. model = result.model
  196. if not prompt_messages:
  197. prompt_messages = result.prompt_messages
  198. if not usage and result.delta.usage:
  199. usage = result.delta.usage
  200. if not finish_reason and result.delta.finish_reason:
  201. finish_reason = result.delta.finish_reason
  202. if not usage:
  203. usage = LLMUsage.empty_usage()
  204. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  205. def _transform_chat_messages(
  206. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  207. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  208. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  209. if messages.edition_type == "jinja2" and messages.jinja2_text:
  210. messages.text = messages.jinja2_text
  211. return messages
  212. for message in messages:
  213. if message.edition_type == "jinja2" and message.jinja2_text:
  214. message.text = message.jinja2_text
  215. return messages
  216. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  217. variables = {}
  218. if not node_data.prompt_config:
  219. return variables
  220. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  221. variable = variable_selector.variable
  222. value = self.graph_runtime_state.variable_pool.get_any(variable_selector.value_selector)
  223. def parse_dict(d: dict) -> str:
  224. """
  225. Parse dict into string
  226. """
  227. # check if it's a context structure
  228. if "metadata" in d and "_source" in d["metadata"] and "content" in d:
  229. return d["content"]
  230. # else, parse the dict
  231. try:
  232. return json.dumps(d, ensure_ascii=False)
  233. except Exception:
  234. return str(d)
  235. if isinstance(value, str):
  236. value = value
  237. elif isinstance(value, list):
  238. result = ""
  239. for item in value:
  240. if isinstance(item, dict):
  241. result += parse_dict(item)
  242. elif isinstance(item, str):
  243. result += item
  244. elif isinstance(item, int | float):
  245. result += str(item)
  246. else:
  247. result += str(item)
  248. result += "\n"
  249. value = result.strip()
  250. elif isinstance(value, dict):
  251. value = parse_dict(value)
  252. elif isinstance(value, int | float):
  253. value = str(value)
  254. else:
  255. value = str(value)
  256. variables[variable] = value
  257. return variables
  258. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  259. inputs = {}
  260. prompt_template = node_data.prompt_template
  261. variable_selectors = []
  262. if isinstance(prompt_template, list):
  263. for prompt in prompt_template:
  264. variable_template_parser = VariableTemplateParser(template=prompt.text)
  265. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  266. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  267. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  268. variable_selectors = variable_template_parser.extract_variable_selectors()
  269. for variable_selector in variable_selectors:
  270. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  271. if variable is None:
  272. raise ValueError(f"Variable {variable_selector.variable} not found")
  273. if isinstance(variable, NoneSegment):
  274. continue
  275. inputs[variable_selector.variable] = variable.to_object()
  276. memory = node_data.memory
  277. if memory and memory.query_prompt_template:
  278. query_variable_selectors = VariableTemplateParser(
  279. template=memory.query_prompt_template
  280. ).extract_variable_selectors()
  281. for variable_selector in query_variable_selectors:
  282. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  283. if variable is None:
  284. raise ValueError(f"Variable {variable_selector.variable} not found")
  285. if isinstance(variable, NoneSegment):
  286. continue
  287. inputs[variable_selector.variable] = variable.to_object()
  288. return inputs
  289. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  290. variable = self.graph_runtime_state.variable_pool.get(selector)
  291. if variable is None:
  292. return []
  293. if isinstance(variable, FileSegment):
  294. return [variable.value]
  295. if isinstance(variable, ArrayFileSegment):
  296. return variable.value
  297. # FIXME: Temporary fix for empty array,
  298. # all variables added to variable pool should be a Segment instance.
  299. if isinstance(variable, ArrayAnySegment) and len(variable.value) == 0:
  300. return []
  301. raise ValueError(f"Invalid variable type: {type(variable)}")
  302. def _fetch_context(self, node_data: LLMNodeData):
  303. if not node_data.context.enabled:
  304. return
  305. if not node_data.context.variable_selector:
  306. return
  307. context_value = self.graph_runtime_state.variable_pool.get_any(node_data.context.variable_selector)
  308. if context_value:
  309. if isinstance(context_value, str):
  310. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value)
  311. elif isinstance(context_value, list):
  312. context_str = ""
  313. original_retriever_resource = []
  314. for item in context_value:
  315. if isinstance(item, str):
  316. context_str += item + "\n"
  317. else:
  318. if "content" not in item:
  319. raise ValueError(f"Invalid context structure: {item}")
  320. context_str += item["content"] + "\n"
  321. retriever_resource = self._convert_to_original_retriever_resource(item)
  322. if retriever_resource:
  323. original_retriever_resource.append(retriever_resource)
  324. yield RunRetrieverResourceEvent(
  325. retriever_resources=original_retriever_resource, context=context_str.strip()
  326. )
  327. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  328. if (
  329. "metadata" in context_dict
  330. and "_source" in context_dict["metadata"]
  331. and context_dict["metadata"]["_source"] == "knowledge"
  332. ):
  333. metadata = context_dict.get("metadata", {})
  334. source = {
  335. "position": metadata.get("position"),
  336. "dataset_id": metadata.get("dataset_id"),
  337. "dataset_name": metadata.get("dataset_name"),
  338. "document_id": metadata.get("document_id"),
  339. "document_name": metadata.get("document_name"),
  340. "data_source_type": metadata.get("document_data_source_type"),
  341. "segment_id": metadata.get("segment_id"),
  342. "retriever_from": metadata.get("retriever_from"),
  343. "score": metadata.get("score"),
  344. "hit_count": metadata.get("segment_hit_count"),
  345. "word_count": metadata.get("segment_word_count"),
  346. "segment_position": metadata.get("segment_position"),
  347. "index_node_hash": metadata.get("segment_index_node_hash"),
  348. "content": context_dict.get("content"),
  349. "page": metadata.get("page"),
  350. }
  351. return source
  352. return None
  353. def _fetch_model_config(
  354. self, node_data_model: ModelConfig
  355. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  356. model_name = node_data_model.name
  357. provider_name = node_data_model.provider
  358. model_manager = ModelManager()
  359. model_instance = model_manager.get_model_instance(
  360. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  361. )
  362. provider_model_bundle = model_instance.provider_model_bundle
  363. model_type_instance = model_instance.model_type_instance
  364. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  365. model_credentials = model_instance.credentials
  366. # check model
  367. provider_model = provider_model_bundle.configuration.get_provider_model(
  368. model=model_name, model_type=ModelType.LLM
  369. )
  370. if provider_model is None:
  371. raise ValueError(f"Model {model_name} not exist.")
  372. if provider_model.status == ModelStatus.NO_CONFIGURE:
  373. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  374. elif provider_model.status == ModelStatus.NO_PERMISSION:
  375. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  376. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  377. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  378. # model config
  379. completion_params = node_data_model.completion_params
  380. stop = []
  381. if "stop" in completion_params:
  382. stop = completion_params["stop"]
  383. del completion_params["stop"]
  384. # get model mode
  385. model_mode = node_data_model.mode
  386. if not model_mode:
  387. raise ValueError("LLM mode is required.")
  388. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  389. if not model_schema:
  390. raise ValueError(f"Model {model_name} not exist.")
  391. return model_instance, ModelConfigWithCredentialsEntity(
  392. provider=provider_name,
  393. model=model_name,
  394. model_schema=model_schema,
  395. mode=model_mode,
  396. provider_model_bundle=provider_model_bundle,
  397. credentials=model_credentials,
  398. parameters=completion_params,
  399. stop=stop,
  400. )
  401. def _fetch_memory(
  402. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  403. ) -> Optional[TokenBufferMemory]:
  404. if not node_data_memory:
  405. return None
  406. # get conversation id
  407. conversation_id = self.graph_runtime_state.variable_pool.get_any(
  408. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  409. )
  410. if conversation_id is None:
  411. return None
  412. # get conversation
  413. conversation = (
  414. db.session.query(Conversation)
  415. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  416. .first()
  417. )
  418. if not conversation:
  419. return None
  420. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  421. return memory
  422. def _fetch_prompt_messages(
  423. self,
  424. *,
  425. system_query: str | None = None,
  426. inputs: dict[str, str] | None = None,
  427. files: Sequence["File"],
  428. context: str | None = None,
  429. memory: TokenBufferMemory | None = None,
  430. model_config: ModelConfigWithCredentialsEntity,
  431. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  432. memory_config: MemoryConfig | None = None,
  433. vision_detail: ImagePromptMessageContent.DETAIL,
  434. ) -> tuple[list[PromptMessage], Optional[list[str]]]:
  435. inputs = inputs or {}
  436. prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True)
  437. prompt_messages = prompt_transform.get_prompt(
  438. prompt_template=prompt_template,
  439. inputs=inputs,
  440. query=system_query or "",
  441. files=files,
  442. context=context,
  443. memory_config=memory_config,
  444. memory=memory,
  445. model_config=model_config,
  446. )
  447. stop = model_config.stop
  448. filtered_prompt_messages = []
  449. for prompt_message in prompt_messages:
  450. if prompt_message.is_empty():
  451. continue
  452. if not isinstance(prompt_message.content, str):
  453. prompt_message_content = []
  454. for content_item in prompt_message.content or []:
  455. if isinstance(content_item, ImagePromptMessageContent):
  456. # Override vision config if LLM node has vision config,
  457. # cuz vision detail is related to the configuration from FileUpload feature.
  458. content_item.detail = vision_detail
  459. prompt_message_content.append(content_item)
  460. elif isinstance(content_item, TextPromptMessageContent | AudioPromptMessageContent):
  461. prompt_message_content.append(content_item)
  462. if len(prompt_message_content) > 1:
  463. prompt_message.content = prompt_message_content
  464. elif (
  465. len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT
  466. ):
  467. prompt_message.content = prompt_message_content[0].data
  468. filtered_prompt_messages.append(prompt_message)
  469. if not filtered_prompt_messages:
  470. raise ValueError(
  471. "No prompt found in the LLM configuration. "
  472. "Please ensure a prompt is properly configured before proceeding."
  473. )
  474. return filtered_prompt_messages, stop
  475. @classmethod
  476. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  477. provider_model_bundle = model_instance.provider_model_bundle
  478. provider_configuration = provider_model_bundle.configuration
  479. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  480. return
  481. system_configuration = provider_configuration.system_configuration
  482. quota_unit = None
  483. for quota_configuration in system_configuration.quota_configurations:
  484. if quota_configuration.quota_type == system_configuration.current_quota_type:
  485. quota_unit = quota_configuration.quota_unit
  486. if quota_configuration.quota_limit == -1:
  487. return
  488. break
  489. used_quota = None
  490. if quota_unit:
  491. if quota_unit == QuotaUnit.TOKENS:
  492. used_quota = usage.total_tokens
  493. elif quota_unit == QuotaUnit.CREDITS:
  494. used_quota = 1
  495. if "gpt-4" in model_instance.model:
  496. used_quota = 20
  497. else:
  498. used_quota = 1
  499. if used_quota is not None and system_configuration.current_quota_type is not None:
  500. db.session.query(Provider).filter(
  501. Provider.tenant_id == tenant_id,
  502. Provider.provider_name == model_instance.provider,
  503. Provider.provider_type == ProviderType.SYSTEM.value,
  504. Provider.quota_type == system_configuration.current_quota_type.value,
  505. Provider.quota_limit > Provider.quota_used,
  506. ).update({"quota_used": Provider.quota_used + used_quota})
  507. db.session.commit()
  508. @classmethod
  509. def _extract_variable_selector_to_variable_mapping(
  510. cls,
  511. *,
  512. graph_config: Mapping[str, Any],
  513. node_id: str,
  514. node_data: LLMNodeData,
  515. ) -> Mapping[str, Sequence[str]]:
  516. prompt_template = node_data.prompt_template
  517. variable_selectors = []
  518. if isinstance(prompt_template, list) and all(
  519. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  520. ):
  521. for prompt in prompt_template:
  522. if prompt.edition_type != "jinja2":
  523. variable_template_parser = VariableTemplateParser(template=prompt.text)
  524. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  525. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  526. if prompt_template.edition_type != "jinja2":
  527. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  528. variable_selectors = variable_template_parser.extract_variable_selectors()
  529. else:
  530. raise ValueError(f"Invalid prompt template type: {type(prompt_template)}")
  531. variable_mapping = {}
  532. for variable_selector in variable_selectors:
  533. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  534. memory = node_data.memory
  535. if memory and memory.query_prompt_template:
  536. query_variable_selectors = VariableTemplateParser(
  537. template=memory.query_prompt_template
  538. ).extract_variable_selectors()
  539. for variable_selector in query_variable_selectors:
  540. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  541. if node_data.context.enabled:
  542. variable_mapping["#context#"] = node_data.context.variable_selector
  543. if node_data.vision.enabled:
  544. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  545. if node_data.memory:
  546. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  547. if node_data.prompt_config:
  548. enable_jinja = False
  549. if isinstance(prompt_template, list):
  550. for prompt in prompt_template:
  551. if prompt.edition_type == "jinja2":
  552. enable_jinja = True
  553. break
  554. else:
  555. if prompt_template.edition_type == "jinja2":
  556. enable_jinja = True
  557. if enable_jinja:
  558. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  559. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  560. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  561. return variable_mapping
  562. @classmethod
  563. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  564. return {
  565. "type": "llm",
  566. "config": {
  567. "prompt_templates": {
  568. "chat_model": {
  569. "prompts": [
  570. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  571. ]
  572. },
  573. "completion_model": {
  574. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  575. "prompt": {
  576. "text": "Here is the chat histories between human and assistant, inside "
  577. "<histories></histories> XML tags.\n\n<histories>\n{{"
  578. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  579. "edition_type": "basic",
  580. },
  581. "stop": ["Human:"],
  582. },
  583. }
  584. },
  585. }