node.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. import json
  2. from collections.abc import Generator, Mapping, Sequence
  3. from typing import TYPE_CHECKING, Any, Optional, cast
  4. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  5. from core.entities.model_entities import ModelStatus
  6. from core.entities.provider_entities import QuotaUnit
  7. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  8. from core.memory.token_buffer_memory import TokenBufferMemory
  9. from core.model_manager import ModelInstance, ModelManager
  10. from core.model_runtime.entities import (
  11. AudioPromptMessageContent,
  12. ImagePromptMessageContent,
  13. PromptMessage,
  14. PromptMessageContentType,
  15. TextPromptMessageContent,
  16. VideoPromptMessageContent,
  17. )
  18. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  19. from core.model_runtime.entities.model_entities import ModelType
  20. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  21. from core.model_runtime.utils.encoders import jsonable_encoder
  22. from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
  23. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  24. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  25. from core.variables import (
  26. ArrayAnySegment,
  27. ArrayFileSegment,
  28. ArraySegment,
  29. FileSegment,
  30. NoneSegment,
  31. ObjectSegment,
  32. StringSegment,
  33. )
  34. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  35. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  36. from core.workflow.enums import SystemVariableKey
  37. from core.workflow.graph_engine.entities.event import InNodeEvent
  38. from core.workflow.nodes.base import BaseNode
  39. from core.workflow.nodes.enums import NodeType
  40. from core.workflow.nodes.event import (
  41. ModelInvokeCompletedEvent,
  42. NodeEvent,
  43. RunCompletedEvent,
  44. RunRetrieverResourceEvent,
  45. RunStreamChunkEvent,
  46. )
  47. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  48. from extensions.ext_database import db
  49. from models.model import Conversation
  50. from models.provider import Provider, ProviderType
  51. from models.workflow import WorkflowNodeExecutionStatus
  52. from .entities import (
  53. LLMNodeChatModelMessage,
  54. LLMNodeCompletionModelPromptTemplate,
  55. LLMNodeData,
  56. ModelConfig,
  57. )
  58. from .exc import (
  59. InvalidContextStructureError,
  60. InvalidVariableTypeError,
  61. LLMModeRequiredError,
  62. LLMNodeError,
  63. ModelNotExistError,
  64. NoPromptFoundError,
  65. VariableNotFoundError,
  66. )
  67. if TYPE_CHECKING:
  68. from core.file.models import File
  69. class LLMNode(BaseNode[LLMNodeData]):
  70. _node_data_cls = LLMNodeData
  71. _node_type = NodeType.LLM
  72. def _run(self) -> NodeRunResult | Generator[NodeEvent | InNodeEvent, None, None]:
  73. node_inputs = None
  74. process_data = None
  75. try:
  76. # init messages template
  77. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  78. # fetch variables and fetch values from variable pool
  79. inputs = self._fetch_inputs(node_data=self.node_data)
  80. # fetch jinja2 inputs
  81. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  82. # merge inputs
  83. inputs.update(jinja_inputs)
  84. node_inputs = {}
  85. # fetch files
  86. files = (
  87. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  88. if self.node_data.vision.enabled
  89. else []
  90. )
  91. if files:
  92. node_inputs["#files#"] = [file.to_dict() for file in files]
  93. # fetch context value
  94. generator = self._fetch_context(node_data=self.node_data)
  95. context = None
  96. for event in generator:
  97. if isinstance(event, RunRetrieverResourceEvent):
  98. context = event.context
  99. yield event
  100. if context:
  101. node_inputs["#context#"] = context
  102. # fetch model config
  103. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  104. # fetch memory
  105. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  106. # fetch prompt messages
  107. if self.node_data.memory:
  108. query = self.graph_runtime_state.variable_pool.get((SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY))
  109. if not query:
  110. raise VariableNotFoundError("Query not found")
  111. query = query.text
  112. else:
  113. query = None
  114. prompt_messages, stop = self._fetch_prompt_messages(
  115. system_query=query,
  116. inputs=inputs,
  117. files=files,
  118. context=context,
  119. memory=memory,
  120. model_config=model_config,
  121. prompt_template=self.node_data.prompt_template,
  122. memory_config=self.node_data.memory,
  123. vision_enabled=self.node_data.vision.enabled,
  124. vision_detail=self.node_data.vision.configs.detail,
  125. )
  126. process_data = {
  127. "model_mode": model_config.mode,
  128. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  129. model_mode=model_config.mode, prompt_messages=prompt_messages
  130. ),
  131. "model_provider": model_config.provider,
  132. "model_name": model_config.model,
  133. }
  134. # handle invoke result
  135. generator = self._invoke_llm(
  136. node_data_model=self.node_data.model,
  137. model_instance=model_instance,
  138. prompt_messages=prompt_messages,
  139. stop=stop,
  140. )
  141. result_text = ""
  142. usage = LLMUsage.empty_usage()
  143. finish_reason = None
  144. for event in generator:
  145. if isinstance(event, RunStreamChunkEvent):
  146. yield event
  147. elif isinstance(event, ModelInvokeCompletedEvent):
  148. result_text = event.text
  149. usage = event.usage
  150. finish_reason = event.finish_reason
  151. break
  152. except LLMNodeError as e:
  153. yield RunCompletedEvent(
  154. run_result=NodeRunResult(
  155. status=WorkflowNodeExecutionStatus.FAILED,
  156. error=str(e),
  157. inputs=node_inputs,
  158. process_data=process_data,
  159. )
  160. )
  161. return
  162. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  163. yield RunCompletedEvent(
  164. run_result=NodeRunResult(
  165. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  166. inputs=node_inputs,
  167. process_data=process_data,
  168. outputs=outputs,
  169. metadata={
  170. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  171. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  172. NodeRunMetadataKey.CURRENCY: usage.currency,
  173. },
  174. llm_usage=usage,
  175. )
  176. )
  177. def _invoke_llm(
  178. self,
  179. node_data_model: ModelConfig,
  180. model_instance: ModelInstance,
  181. prompt_messages: list[PromptMessage],
  182. stop: Optional[list[str]] = None,
  183. ) -> Generator[NodeEvent, None, None]:
  184. db.session.close()
  185. invoke_result = model_instance.invoke_llm(
  186. prompt_messages=prompt_messages,
  187. model_parameters=node_data_model.completion_params,
  188. stop=stop,
  189. stream=True,
  190. user=self.user_id,
  191. )
  192. # handle invoke result
  193. generator = self._handle_invoke_result(invoke_result=invoke_result)
  194. usage = LLMUsage.empty_usage()
  195. for event in generator:
  196. yield event
  197. if isinstance(event, ModelInvokeCompletedEvent):
  198. usage = event.usage
  199. # deduct quota
  200. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  201. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  202. if isinstance(invoke_result, LLMResult):
  203. return
  204. model = None
  205. prompt_messages: list[PromptMessage] = []
  206. full_text = ""
  207. usage = None
  208. finish_reason = None
  209. for result in invoke_result:
  210. text = result.delta.message.content
  211. full_text += text
  212. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  213. if not model:
  214. model = result.model
  215. if not prompt_messages:
  216. prompt_messages = result.prompt_messages
  217. if not usage and result.delta.usage:
  218. usage = result.delta.usage
  219. if not finish_reason and result.delta.finish_reason:
  220. finish_reason = result.delta.finish_reason
  221. if not usage:
  222. usage = LLMUsage.empty_usage()
  223. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  224. def _transform_chat_messages(
  225. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  226. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  227. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  228. if messages.edition_type == "jinja2" and messages.jinja2_text:
  229. messages.text = messages.jinja2_text
  230. return messages
  231. for message in messages:
  232. if message.edition_type == "jinja2" and message.jinja2_text:
  233. message.text = message.jinja2_text
  234. return messages
  235. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  236. variables = {}
  237. if not node_data.prompt_config:
  238. return variables
  239. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  240. variable_name = variable_selector.variable
  241. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  242. if variable is None:
  243. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  244. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  245. """
  246. Parse dict into string
  247. """
  248. # check if it's a context structure
  249. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  250. return input_dict["content"]
  251. # else, parse the dict
  252. try:
  253. return json.dumps(input_dict, ensure_ascii=False)
  254. except Exception:
  255. return str(input_dict)
  256. if isinstance(variable, ArraySegment):
  257. result = ""
  258. for item in variable.value:
  259. if isinstance(item, dict):
  260. result += parse_dict(item)
  261. else:
  262. result += str(item)
  263. result += "\n"
  264. value = result.strip()
  265. elif isinstance(variable, ObjectSegment):
  266. value = parse_dict(variable.value)
  267. else:
  268. value = variable.text
  269. variables[variable_name] = value
  270. return variables
  271. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  272. inputs = {}
  273. prompt_template = node_data.prompt_template
  274. variable_selectors = []
  275. if isinstance(prompt_template, list):
  276. for prompt in prompt_template:
  277. variable_template_parser = VariableTemplateParser(template=prompt.text)
  278. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  279. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  280. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  281. variable_selectors = variable_template_parser.extract_variable_selectors()
  282. for variable_selector in variable_selectors:
  283. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  284. if variable is None:
  285. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  286. if isinstance(variable, NoneSegment):
  287. inputs[variable_selector.variable] = ""
  288. inputs[variable_selector.variable] = variable.to_object()
  289. memory = node_data.memory
  290. if memory and memory.query_prompt_template:
  291. query_variable_selectors = VariableTemplateParser(
  292. template=memory.query_prompt_template
  293. ).extract_variable_selectors()
  294. for variable_selector in query_variable_selectors:
  295. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  296. if variable is None:
  297. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  298. if isinstance(variable, NoneSegment):
  299. continue
  300. inputs[variable_selector.variable] = variable.to_object()
  301. return inputs
  302. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  303. variable = self.graph_runtime_state.variable_pool.get(selector)
  304. if variable is None:
  305. return []
  306. elif isinstance(variable, FileSegment):
  307. return [variable.value]
  308. elif isinstance(variable, ArrayFileSegment):
  309. return variable.value
  310. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  311. return []
  312. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  313. def _fetch_context(self, node_data: LLMNodeData):
  314. if not node_data.context.enabled:
  315. return
  316. if not node_data.context.variable_selector:
  317. return
  318. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  319. if context_value_variable:
  320. if isinstance(context_value_variable, StringSegment):
  321. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  322. elif isinstance(context_value_variable, ArraySegment):
  323. context_str = ""
  324. original_retriever_resource = []
  325. for item in context_value_variable.value:
  326. if isinstance(item, str):
  327. context_str += item + "\n"
  328. else:
  329. if "content" not in item:
  330. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  331. context_str += item["content"] + "\n"
  332. retriever_resource = self._convert_to_original_retriever_resource(item)
  333. if retriever_resource:
  334. original_retriever_resource.append(retriever_resource)
  335. yield RunRetrieverResourceEvent(
  336. retriever_resources=original_retriever_resource, context=context_str.strip()
  337. )
  338. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  339. if (
  340. "metadata" in context_dict
  341. and "_source" in context_dict["metadata"]
  342. and context_dict["metadata"]["_source"] == "knowledge"
  343. ):
  344. metadata = context_dict.get("metadata", {})
  345. source = {
  346. "position": metadata.get("position"),
  347. "dataset_id": metadata.get("dataset_id"),
  348. "dataset_name": metadata.get("dataset_name"),
  349. "document_id": metadata.get("document_id"),
  350. "document_name": metadata.get("document_name"),
  351. "data_source_type": metadata.get("document_data_source_type"),
  352. "segment_id": metadata.get("segment_id"),
  353. "retriever_from": metadata.get("retriever_from"),
  354. "score": metadata.get("score"),
  355. "hit_count": metadata.get("segment_hit_count"),
  356. "word_count": metadata.get("segment_word_count"),
  357. "segment_position": metadata.get("segment_position"),
  358. "index_node_hash": metadata.get("segment_index_node_hash"),
  359. "content": context_dict.get("content"),
  360. "page": metadata.get("page"),
  361. }
  362. return source
  363. return None
  364. def _fetch_model_config(
  365. self, node_data_model: ModelConfig
  366. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  367. model_name = node_data_model.name
  368. provider_name = node_data_model.provider
  369. model_manager = ModelManager()
  370. model_instance = model_manager.get_model_instance(
  371. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  372. )
  373. provider_model_bundle = model_instance.provider_model_bundle
  374. model_type_instance = model_instance.model_type_instance
  375. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  376. model_credentials = model_instance.credentials
  377. # check model
  378. provider_model = provider_model_bundle.configuration.get_provider_model(
  379. model=model_name, model_type=ModelType.LLM
  380. )
  381. if provider_model is None:
  382. raise ModelNotExistError(f"Model {model_name} not exist.")
  383. if provider_model.status == ModelStatus.NO_CONFIGURE:
  384. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  385. elif provider_model.status == ModelStatus.NO_PERMISSION:
  386. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  387. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  388. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  389. # model config
  390. completion_params = node_data_model.completion_params
  391. stop = []
  392. if "stop" in completion_params:
  393. stop = completion_params["stop"]
  394. del completion_params["stop"]
  395. # get model mode
  396. model_mode = node_data_model.mode
  397. if not model_mode:
  398. raise LLMModeRequiredError("LLM mode is required.")
  399. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  400. if not model_schema:
  401. raise ModelNotExistError(f"Model {model_name} not exist.")
  402. return model_instance, ModelConfigWithCredentialsEntity(
  403. provider=provider_name,
  404. model=model_name,
  405. model_schema=model_schema,
  406. mode=model_mode,
  407. provider_model_bundle=provider_model_bundle,
  408. credentials=model_credentials,
  409. parameters=completion_params,
  410. stop=stop,
  411. )
  412. def _fetch_memory(
  413. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  414. ) -> Optional[TokenBufferMemory]:
  415. if not node_data_memory:
  416. return None
  417. # get conversation id
  418. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  419. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  420. )
  421. if not isinstance(conversation_id_variable, StringSegment):
  422. return None
  423. conversation_id = conversation_id_variable.value
  424. # get conversation
  425. conversation = (
  426. db.session.query(Conversation)
  427. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  428. .first()
  429. )
  430. if not conversation:
  431. return None
  432. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  433. return memory
  434. def _fetch_prompt_messages(
  435. self,
  436. *,
  437. system_query: str | None = None,
  438. inputs: dict[str, str] | None = None,
  439. files: Sequence["File"],
  440. context: str | None = None,
  441. memory: TokenBufferMemory | None = None,
  442. model_config: ModelConfigWithCredentialsEntity,
  443. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  444. memory_config: MemoryConfig | None = None,
  445. vision_enabled: bool = False,
  446. vision_detail: ImagePromptMessageContent.DETAIL,
  447. ) -> tuple[list[PromptMessage], Optional[list[str]]]:
  448. inputs = inputs or {}
  449. prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True)
  450. prompt_messages = prompt_transform.get_prompt(
  451. prompt_template=prompt_template,
  452. inputs=inputs,
  453. query=system_query or "",
  454. files=files,
  455. context=context,
  456. memory_config=memory_config,
  457. memory=memory,
  458. model_config=model_config,
  459. )
  460. stop = model_config.stop
  461. filtered_prompt_messages = []
  462. for prompt_message in prompt_messages:
  463. if prompt_message.is_empty():
  464. continue
  465. if not isinstance(prompt_message.content, str):
  466. prompt_message_content = []
  467. for content_item in prompt_message.content or []:
  468. # Skip image if vision is disabled
  469. if not vision_enabled and content_item.type == PromptMessageContentType.IMAGE:
  470. continue
  471. if isinstance(content_item, ImagePromptMessageContent):
  472. # Override vision config if LLM node has vision config,
  473. # cuz vision detail is related to the configuration from FileUpload feature.
  474. content_item.detail = vision_detail
  475. prompt_message_content.append(content_item)
  476. elif isinstance(
  477. content_item, TextPromptMessageContent | AudioPromptMessageContent | VideoPromptMessageContent
  478. ):
  479. prompt_message_content.append(content_item)
  480. if len(prompt_message_content) > 1:
  481. prompt_message.content = prompt_message_content
  482. elif (
  483. len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT
  484. ):
  485. prompt_message.content = prompt_message_content[0].data
  486. filtered_prompt_messages.append(prompt_message)
  487. if not filtered_prompt_messages:
  488. raise NoPromptFoundError(
  489. "No prompt found in the LLM configuration. "
  490. "Please ensure a prompt is properly configured before proceeding."
  491. )
  492. return filtered_prompt_messages, stop
  493. @classmethod
  494. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  495. provider_model_bundle = model_instance.provider_model_bundle
  496. provider_configuration = provider_model_bundle.configuration
  497. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  498. return
  499. system_configuration = provider_configuration.system_configuration
  500. quota_unit = None
  501. for quota_configuration in system_configuration.quota_configurations:
  502. if quota_configuration.quota_type == system_configuration.current_quota_type:
  503. quota_unit = quota_configuration.quota_unit
  504. if quota_configuration.quota_limit == -1:
  505. return
  506. break
  507. used_quota = None
  508. if quota_unit:
  509. if quota_unit == QuotaUnit.TOKENS:
  510. used_quota = usage.total_tokens
  511. elif quota_unit == QuotaUnit.CREDITS:
  512. used_quota = 1
  513. if "gpt-4" in model_instance.model:
  514. used_quota = 20
  515. else:
  516. used_quota = 1
  517. if used_quota is not None and system_configuration.current_quota_type is not None:
  518. db.session.query(Provider).filter(
  519. Provider.tenant_id == tenant_id,
  520. Provider.provider_name == model_instance.provider,
  521. Provider.provider_type == ProviderType.SYSTEM.value,
  522. Provider.quota_type == system_configuration.current_quota_type.value,
  523. Provider.quota_limit > Provider.quota_used,
  524. ).update({"quota_used": Provider.quota_used + used_quota})
  525. db.session.commit()
  526. @classmethod
  527. def _extract_variable_selector_to_variable_mapping(
  528. cls,
  529. *,
  530. graph_config: Mapping[str, Any],
  531. node_id: str,
  532. node_data: LLMNodeData,
  533. ) -> Mapping[str, Sequence[str]]:
  534. prompt_template = node_data.prompt_template
  535. variable_selectors = []
  536. if isinstance(prompt_template, list) and all(
  537. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  538. ):
  539. for prompt in prompt_template:
  540. if prompt.edition_type != "jinja2":
  541. variable_template_parser = VariableTemplateParser(template=prompt.text)
  542. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  543. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  544. if prompt_template.edition_type != "jinja2":
  545. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  546. variable_selectors = variable_template_parser.extract_variable_selectors()
  547. else:
  548. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  549. variable_mapping = {}
  550. for variable_selector in variable_selectors:
  551. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  552. memory = node_data.memory
  553. if memory and memory.query_prompt_template:
  554. query_variable_selectors = VariableTemplateParser(
  555. template=memory.query_prompt_template
  556. ).extract_variable_selectors()
  557. for variable_selector in query_variable_selectors:
  558. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  559. if node_data.context.enabled:
  560. variable_mapping["#context#"] = node_data.context.variable_selector
  561. if node_data.vision.enabled:
  562. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  563. if node_data.memory:
  564. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  565. if node_data.prompt_config:
  566. enable_jinja = False
  567. if isinstance(prompt_template, list):
  568. for prompt in prompt_template:
  569. if prompt.edition_type == "jinja2":
  570. enable_jinja = True
  571. break
  572. else:
  573. if prompt_template.edition_type == "jinja2":
  574. enable_jinja = True
  575. if enable_jinja:
  576. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  577. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  578. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  579. return variable_mapping
  580. @classmethod
  581. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  582. return {
  583. "type": "llm",
  584. "config": {
  585. "prompt_templates": {
  586. "chat_model": {
  587. "prompts": [
  588. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  589. ]
  590. },
  591. "completion_model": {
  592. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  593. "prompt": {
  594. "text": "Here is the chat histories between human and assistant, inside "
  595. "<histories></histories> XML tags.\n\n<histories>\n{{"
  596. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  597. "edition_type": "basic",
  598. },
  599. "stop": ["Human:"],
  600. },
  601. }
  602. },
  603. }