|  | @@ -449,6 +449,11 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
 | 
	
		
			
				|  |  |              human_prompt_prefix = "\n[INST]"
 | 
	
		
			
				|  |  |              human_prompt_postfix = "[\\INST]\n"
 | 
	
		
			
				|  |  |              ai_prompt = ""
 | 
	
		
			
				|  |  | +        
 | 
	
		
			
				|  |  | +        elif model_prefix == "mistral":
 | 
	
		
			
				|  |  | +            human_prompt_prefix = "<s>[INST]"
 | 
	
		
			
				|  |  | +            human_prompt_postfix = "[\\INST]\n"
 | 
	
		
			
				|  |  | +            ai_prompt = "\n\nAssistant:"
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |          elif model_prefix == "amazon":
 | 
	
		
			
				|  |  |              human_prompt_prefix = "\n\nUser:"
 | 
	
	
		
			
				|  | @@ -519,6 +524,13 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
 | 
	
		
			
				|  |  |                  payload["frequencyPenalty"] = {model_parameters.get("frequencyPenalty")}
 | 
	
		
			
				|  |  |              if model_parameters.get("countPenalty"):
 | 
	
		
			
				|  |  |                  payload["countPenalty"] = {model_parameters.get("countPenalty")}
 | 
	
		
			
				|  |  | +        
 | 
	
		
			
				|  |  | +        elif model_prefix == "mistral":
 | 
	
		
			
				|  |  | +            payload["temperature"] = model_parameters.get("temperature")
 | 
	
		
			
				|  |  | +            payload["top_p"] = model_parameters.get("top_p")
 | 
	
		
			
				|  |  | +            payload["max_tokens"] = model_parameters.get("max_tokens")
 | 
	
		
			
				|  |  | +            payload["prompt"] = self._convert_messages_to_prompt(prompt_messages, model_prefix)
 | 
	
		
			
				|  |  | +            payload["stop"] = stop[:10] if stop else []
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |          elif model_prefix == "anthropic":
 | 
	
		
			
				|  |  |              payload = { **model_parameters }
 | 
	
	
		
			
				|  | @@ -648,6 +660,11 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
 | 
	
		
			
				|  |  |              output = response_body.get("generation").strip('\n')
 | 
	
		
			
				|  |  |              prompt_tokens = response_body.get("prompt_token_count")
 | 
	
		
			
				|  |  |              completion_tokens = response_body.get("generation_token_count")
 | 
	
		
			
				|  |  | +        
 | 
	
		
			
				|  |  | +        elif model_prefix == "mistral":
 | 
	
		
			
				|  |  | +            output = response_body.get("outputs")[0].get("text")
 | 
	
		
			
				|  |  | +            prompt_tokens = response.get('ResponseMetadata').get('HTTPHeaders').get('x-amzn-bedrock-input-token-count')
 | 
	
		
			
				|  |  | +            completion_tokens = response.get('ResponseMetadata').get('HTTPHeaders').get('x-amzn-bedrock-output-token-count')
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |          else:
 | 
	
		
			
				|  |  |              raise ValueError(f"Got unknown model prefix {model_prefix} when handling block response")
 | 
	
	
		
			
				|  | @@ -731,6 +748,10 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
 | 
	
		
			
				|  |  |                  content_delta = payload.get("text")
 | 
	
		
			
				|  |  |                  finish_reason = payload.get("finish_reason")
 | 
	
		
			
				|  |  |              
 | 
	
		
			
				|  |  | +            elif model_prefix == "mistral":
 | 
	
		
			
				|  |  | +                content_delta = payload.get('outputs')[0].get("text")
 | 
	
		
			
				|  |  | +                finish_reason = payload.get('outputs')[0].get("stop_reason")
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  |              elif model_prefix == "meta":
 | 
	
		
			
				|  |  |                  content_delta = payload.get("generation").strip('\n')
 | 
	
		
			
				|  |  |                  finish_reason = payload.get("stop_reason")
 |