Since Mistral supports a variety of date-stamped models, we explicitly list the most popular models but
allow any name in the type hints.
Since the Mistral docs for a full list.
ALL FIELDS MUST BE mistral_ PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
Source code in pydantic_ai_slim/pydantic_ai/models/mistral.py
9495969798
classMistralModelSettings(ModelSettings):"""Settings used for a Mistral model request. ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS. """
@dataclass(init=False)classMistralModel(Model):"""A model that uses Mistral. Internally, this uses the [Mistral Python client](https://github.com/mistralai/client-python) to interact with the API. [API Documentation](https://docs.mistral.ai/) """client:Mistral=field(repr=False)json_mode_schema_prompt:str="""Answer in JSON Object, respect the format:\n```\n{schema}\n```\n"""_model_name:MistralModelName=field(repr=False)_system:str=field(default='mistral_ai',repr=False)def__init__(self,model_name:MistralModelName,*,provider:Literal['mistral']|Provider[Mistral]='mistral',json_mode_schema_prompt:str="""Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""",):"""Initialize a Mistral model. Args: model_name: The name of the model to use. provider: The provider to use for authentication and API access. Can be either the string 'mistral' or an instance of `Provider[Mistral]`. If not provided, a new provider will be created using the other parameters. json_mode_schema_prompt: The prompt to show when the model expects a JSON object as input. """self._model_name=model_nameself.json_mode_schema_prompt=json_mode_schema_promptifisinstance(provider,str):provider=infer_provider(provider)self.client=provider.client@propertydefbase_url(self)->str:returnself.client.sdk_configuration.get_server_details()[0]asyncdefrequest(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->tuple[ModelResponse,Usage]:"""Make a non-streaming request to the model from Pydantic AI call."""check_allow_model_requests()response=awaitself._completions_create(messages,cast(MistralModelSettings,model_settingsor{}),model_request_parameters)returnself._process_response(response),_map_usage(response)@asynccontextmanagerasyncdefrequest_stream(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->AsyncIterator[StreamedResponse]:"""Make a streaming request to the model from Pydantic AI call."""check_allow_model_requests()response=awaitself._stream_completions_create(messages,cast(MistralModelSettings,model_settingsor{}),model_request_parameters)asyncwithresponse:yieldawaitself._process_streamed_response(model_request_parameters.output_tools,response)@propertydefmodel_name(self)->MistralModelName:"""The model name."""returnself._model_name@propertydefsystem(self)->str:"""The system / model provider."""returnself._systemasyncdef_completions_create(self,messages:list[ModelMessage],model_settings:MistralModelSettings,model_request_parameters:ModelRequestParameters,)->MistralChatCompletionResponse:"""Make a non-streaming request to the model."""try:response=awaitself.client.chat.complete_async(model=str(self._model_name),messages=self._map_messages(messages),n=1,tools=self._map_function_and_output_tools_definition(model_request_parameters)orUNSET,tool_choice=self._get_tool_choice(model_request_parameters),stream=False,max_tokens=model_settings.get('max_tokens',UNSET),temperature=model_settings.get('temperature',UNSET),top_p=model_settings.get('top_p',1),timeout_ms=self._get_timeout_ms(model_settings.get('timeout')),random_seed=model_settings.get('seed',UNSET),stop=model_settings.get('stop_sequences',None),http_headers={'User-Agent':get_user_agent()},)exceptSDKErrorase:if(status_code:=e.status_code)>=400:raiseModelHTTPError(status_code=status_code,model_name=self.model_name,body=e.body)fromeraiseassertresponse,'A unexpected empty response from Mistral.'returnresponseasyncdef_stream_completions_create(self,messages:list[ModelMessage],model_settings:MistralModelSettings,model_request_parameters:ModelRequestParameters,)->MistralEventStreamAsync[MistralCompletionEvent]:"""Create a streaming completion request to the Mistral model."""response:MistralEventStreamAsync[MistralCompletionEvent]|Nonemistral_messages=self._map_messages(messages)if(model_request_parameters.output_toolsandmodel_request_parameters.function_toolsormodel_request_parameters.function_tools):# Function Callingresponse=awaitself.client.chat.stream_async(model=str(self._model_name),messages=mistral_messages,n=1,tools=self._map_function_and_output_tools_definition(model_request_parameters)orUNSET,tool_choice=self._get_tool_choice(model_request_parameters),temperature=model_settings.get('temperature',UNSET),top_p=model_settings.get('top_p',1),max_tokens=model_settings.get('max_tokens',UNSET),timeout_ms=self._get_timeout_ms(model_settings.get('timeout')),presence_penalty=model_settings.get('presence_penalty'),frequency_penalty=model_settings.get('frequency_penalty'),stop=model_settings.get('stop_sequences',None),http_headers={'User-Agent':get_user_agent()},)elifmodel_request_parameters.output_tools:# Json Modeparameters_json_schemas=[tool.parameters_json_schemafortoolinmodel_request_parameters.output_tools]user_output_format_message=self._generate_user_output_format(parameters_json_schemas)mistral_messages.append(user_output_format_message)response=awaitself.client.chat.stream_async(model=str(self._model_name),messages=mistral_messages,response_format={'type':'json_object'},stream=True,http_headers={'User-Agent':get_user_agent()},)else:# Stream Moderesponse=awaitself.client.chat.stream_async(model=str(self._model_name),messages=mistral_messages,stream=True,http_headers={'User-Agent':get_user_agent()},)assertresponse,'A unexpected empty response from Mistral.'returnresponsedef_get_tool_choice(self,model_request_parameters:ModelRequestParameters)->MistralToolChoiceEnum|None:"""Get tool choice for the model. - "auto": Default mode. Model decides if it uses the tool or not. - "any": Select any tool. - "none": Prevents tool use. - "required": Forces tool use. """ifnotmodel_request_parameters.function_toolsandnotmodel_request_parameters.output_tools:returnNoneelifnotmodel_request_parameters.allow_text_output:return'required'else:return'auto'def_map_function_and_output_tools_definition(self,model_request_parameters:ModelRequestParameters)->list[MistralTool]|None:"""Map function and output tools to MistralTool format. Returns None if both function_tools and output_tools are empty. """all_tools:list[ToolDefinition]=(model_request_parameters.function_tools+model_request_parameters.output_tools)tools=[MistralTool(function=MistralFunction(name=r.name,parameters=r.parameters_json_schema,description=r.description))forrinall_tools]returntoolsiftoolselseNonedef_process_response(self,response:MistralChatCompletionResponse)->ModelResponse:"""Process a non-streamed response, and prepare a message to return."""assertresponse.choices,'Unexpected empty response choice.'ifresponse.created:timestamp=datetime.fromtimestamp(response.created,tz=timezone.utc)else:timestamp=_now_utc()choice=response.choices[0]content=choice.message.contenttool_calls=choice.message.tool_callsparts:list[ModelResponsePart]=[]iftext:=_map_content(content):parts.append(TextPart(content=text))ifisinstance(tool_calls,list):fortool_callintool_calls:tool=self._map_mistral_to_pydantic_tool_call(tool_call=tool_call)parts.append(tool)returnModelResponse(parts,model_name=response.model,timestamp=timestamp)asyncdef_process_streamed_response(self,output_tools:list[ToolDefinition],response:MistralEventStreamAsync[MistralCompletionEvent],)->StreamedResponse:"""Process a streamed response, and prepare a streaming response to return."""peekable_response=_utils.PeekableAsyncStream(response)first_chunk=awaitpeekable_response.peek()ifisinstance(first_chunk,_utils.Unset):raiseUnexpectedModelBehavior('Streamed response ended without content or tool calls')iffirst_chunk.data.created:timestamp=datetime.fromtimestamp(first_chunk.data.created,tz=timezone.utc)else:timestamp=datetime.now(tz=timezone.utc)returnMistralStreamedResponse(_response=peekable_response,_model_name=self._model_name,_timestamp=timestamp,_output_tools={c.name:cforcinoutput_tools},)@staticmethoddef_map_mistral_to_pydantic_tool_call(tool_call:MistralToolCall)->ToolCallPart:"""Maps a MistralToolCall to a ToolCall."""tool_call_id=tool_call.idor_generate_tool_call_id()func_call=tool_call.functionreturnToolCallPart(func_call.name,func_call.arguments,tool_call_id)@staticmethoddef_map_tool_call(t:ToolCallPart)->MistralToolCall:"""Maps a pydantic-ai ToolCall to a MistralToolCall."""returnMistralToolCall(id=_utils.guard_tool_call_id(t=t),type='function',function=MistralFunctionCall(name=t.tool_name,arguments=t.args),)def_generate_user_output_format(self,schemas:list[dict[str,Any]])->MistralUserMessage:"""Get a message with an example of the expected output format."""examples:list[dict[str,Any]]=[]forschemainschemas:typed_dict_definition:dict[str,Any]={}forkey,valueinschema.get('properties',{}).items():typed_dict_definition[key]=self._get_python_type(value)examples.append(typed_dict_definition)example_schema=examples[0]iflen(examples)==1elseexamplesreturnMistralUserMessage(content=self.json_mode_schema_prompt.format(schema=example_schema))@classmethoddef_get_python_type(cls,value:dict[str,Any])->str:"""Return a string representation of the Python type for a single JSON schema property. This function handles recursion for nested arrays/objects and `anyOf`. """# 1) Handle anyOf first, because it's a different schema structureifany_of:=value.get('anyOf'):# Simplistic approach: pick the first option in anyOf# (In reality, you'd possibly want to merge or union types)returnf'Optional[{cls._get_python_type(any_of[0])}]'# 2) If we have a top-level "type" fieldvalue_type=value.get('type')ifnotvalue_type:# No explicit type; fallbackreturn'Any'# 3) Direct simple type mapping (string, integer, float, bool, None)ifvalue_typeinSIMPLE_JSON_TYPE_MAPPINGandvalue_type!='array'andvalue_type!='object':returnSIMPLE_JSON_TYPE_MAPPING[value_type]# 4) Array: Recursively get the item typeifvalue_type=='array':items=value.get('items',{})returnf'list[{cls._get_python_type(items)}]'# 5) Object: Check for additionalPropertiesifvalue_type=='object':additional_properties=value.get('additionalProperties',{})ifisinstance(additional_properties,bool):return'bool'# pragma: no coveradditional_properties_type=additional_properties.get('type')if(additional_properties_typeinSIMPLE_JSON_TYPE_MAPPINGandadditional_properties_type!='array'andadditional_properties_type!='object'):# dict[str, bool/int/float/etc...]returnf'dict[str, {SIMPLE_JSON_TYPE_MAPPING[additional_properties_type]}]'elifadditional_properties_type=='array':array_items=additional_properties.get('items',{})returnf'dict[str, list[{cls._get_python_type(array_items)}]]'elifadditional_properties_type=='object':# nested dictionary of unknown shapereturn'dict[str, dict[str, Any]]'else:# If no additionalProperties type or something else, default to a generic dictreturn'dict[str, Any]'# 6) Fallbackreturn'Any'@staticmethoddef_get_timeout_ms(timeout:Timeout|float|None)->int|None:"""Convert a timeout to milliseconds."""iftimeoutisNone:returnNoneifisinstance(timeout,float):returnint(1000*timeout)raiseNotImplementedError('Timeout object is not yet supported for MistralModel.')def_map_user_message(self,message:ModelRequest)->Iterable[MistralMessages]:forpartinmessage.parts:ifisinstance(part,SystemPromptPart):yieldMistralSystemMessage(content=part.content)elifisinstance(part,UserPromptPart):yieldself._map_user_prompt(part)elifisinstance(part,ToolReturnPart):yieldMistralToolMessage(tool_call_id=part.tool_call_id,content=part.model_response_str(),)elifisinstance(part,RetryPromptPart):ifpart.tool_nameisNone:yieldMistralUserMessage(content=part.model_response())else:yieldMistralToolMessage(tool_call_id=part.tool_call_id,content=part.model_response(),)else:assert_never(part)def_map_messages(self,messages:list[ModelMessage])->list[MistralMessages]:"""Just maps a `pydantic_ai.Message` to a `MistralMessage`."""mistral_messages:list[MistralMessages]=[]formessageinmessages:ifisinstance(message,ModelRequest):mistral_messages.extend(self._map_user_message(message))elifisinstance(message,ModelResponse):content_chunks:list[MistralContentChunk]=[]tool_calls:list[MistralToolCall]=[]forpartinmessage.parts:ifisinstance(part,TextPart):content_chunks.append(MistralTextChunk(text=part.content))elifisinstance(part,ToolCallPart):tool_calls.append(self._map_tool_call(part))else:assert_never(part)mistral_messages.append(MistralAssistantMessage(content=content_chunks,tool_calls=tool_calls))else:assert_never(message)ifinstructions:=self._get_instructions(messages):mistral_messages.insert(0,MistralSystemMessage(content=instructions))returnmistral_messagesdef_map_user_prompt(self,part:UserPromptPart)->MistralUserMessage:content:str|list[MistralContentChunk]ifisinstance(part.content,str):content=part.contentelse:content=[]foriteminpart.content:ifisinstance(item,str):content.append(MistralTextChunk(text=item))elifisinstance(item,ImageUrl):content.append(MistralImageURLChunk(image_url=MistralImageURL(url=item.url)))elifisinstance(item,BinaryContent):base64_encoded=base64.b64encode(item.data).decode('utf-8')ifitem.is_image:image_url=MistralImageURL(url=f'data:{item.media_type};base64,{base64_encoded}')content.append(MistralImageURLChunk(image_url=image_url,type='image_url'))else:raiseRuntimeError('Only image binary content is supported for Mistral.')elifisinstance(item,DocumentUrl):raiseRuntimeError('DocumentUrl is not supported in Mistral.')elifisinstance(item,VideoUrl):raiseRuntimeError('VideoUrl is not supported in Mistral.')else:# pragma: no coverraiseRuntimeError(f'Unsupported content type: {type(item)}')returnMistralUserMessage(content=content)
__init__
__init__(model_name:MistralModelName,*,provider:(Literal["mistral"]|Provider[Mistral])="mistral",json_mode_schema_prompt:str="Answer in JSON Object, respect the format:\n```\n{schema}\n```\n")
The provider to use for authentication and API access. Can be either the string
'mistral' or an instance of Provider[Mistral]. If not provided, a new provider will be
created using the other parameters.
def__init__(self,model_name:MistralModelName,*,provider:Literal['mistral']|Provider[Mistral]='mistral',json_mode_schema_prompt:str="""Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""",):"""Initialize a Mistral model. Args: model_name: The name of the model to use. provider: The provider to use for authentication and API access. Can be either the string 'mistral' or an instance of `Provider[Mistral]`. If not provided, a new provider will be created using the other parameters. json_mode_schema_prompt: The prompt to show when the model expects a JSON object as input. """self._model_name=model_nameself.json_mode_schema_prompt=json_mode_schema_promptifisinstance(provider,str):provider=infer_provider(provider)self.client=provider.client
Make a non-streaming request to the model from Pydantic AI call.
Source code in pydantic_ai_slim/pydantic_ai/models/mistral.py
145146147148149150151152153154155156
asyncdefrequest(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->tuple[ModelResponse,Usage]:"""Make a non-streaming request to the model from Pydantic AI call."""check_allow_model_requests()response=awaitself._completions_create(messages,cast(MistralModelSettings,model_settingsor{}),model_request_parameters)returnself._process_response(response),_map_usage(response)
Make a streaming request to the model from Pydantic AI call.
Source code in pydantic_ai_slim/pydantic_ai/models/mistral.py
158159160161162163164165166167168169170171
@asynccontextmanagerasyncdefrequest_stream(self,messages:list[ModelMessage],model_settings:ModelSettings|None,model_request_parameters:ModelRequestParameters,)->AsyncIterator[StreamedResponse]:"""Make a streaming request to the model from Pydantic AI call."""check_allow_model_requests()response=awaitself._stream_completions_create(messages,cast(MistralModelSettings,model_settingsor{}),model_request_parameters)asyncwithresponse:yieldawaitself._process_streamed_response(model_request_parameters.output_tools,response)
@dataclassclassMistralStreamedResponse(StreamedResponse):"""Implementation of `StreamedResponse` for Mistral models."""_model_name:MistralModelName_response:AsyncIterable[MistralCompletionEvent]_timestamp:datetime_output_tools:dict[str,ToolDefinition]_delta_content:str=field(default='',init=False)asyncdef_get_event_iterator(self)->AsyncIterator[ModelResponseStreamEvent]:chunk:MistralCompletionEventasyncforchunkinself._response:self._usage+=_map_usage(chunk.data)try:choice=chunk.data.choices[0]exceptIndexError:continue# Handle the text part of the responsecontent=choice.delta.contenttext=_map_content(content)iftext:# Attempt to produce an output tool call from the received textifself._output_tools:self._delta_content+=textmaybe_tool_call_part=self._try_get_output_tool_from_text(self._delta_content,self._output_tools)ifmaybe_tool_call_part:yieldself._parts_manager.handle_tool_call_part(vendor_part_id='output',tool_name=maybe_tool_call_part.tool_name,args=maybe_tool_call_part.args_as_dict(),tool_call_id=maybe_tool_call_part.tool_call_id,)else:yieldself._parts_manager.handle_text_delta(vendor_part_id='content',content=text)# Handle the explicit tool callsforindex,dtcinenumerate(choice.delta.tool_callsor[]):# It seems that mistral just sends full tool calls, so we just use them directly, rather than buildingyieldself._parts_manager.handle_tool_call_part(vendor_part_id=index,tool_name=dtc.function.name,args=dtc.function.arguments,tool_call_id=dtc.id)@propertydefmodel_name(self)->MistralModelName:"""Get the model name of the response."""returnself._model_name@propertydeftimestamp(self)->datetime:"""Get the timestamp of the response."""returnself._timestamp@staticmethoddef_try_get_output_tool_from_text(text:str,output_tools:dict[str,ToolDefinition])->ToolCallPart|None:output_json:dict[str,Any]|None=pydantic_core.from_json(text,allow_partial='trailing-strings')ifoutput_json:foroutput_toolinoutput_tools.values():# NOTE: Additional verification to prevent JSON validation to crash# Ensures required parameters in the JSON schema are respected, especially for stream-based return types.# Example with BaseModel and required fields.ifnotMistralStreamedResponse._validate_required_json_schema(output_json,output_tool.parameters_json_schema):continue# The following part_id will be thrown awayreturnToolCallPart(tool_name=output_tool.name,args=output_json)@staticmethoddef_validate_required_json_schema(json_dict:dict[str,Any],json_schema:dict[str,Any])->bool:"""Validate that all required parameters in the JSON schema are present in the JSON dictionary."""required_params=json_schema.get('required',[])properties=json_schema.get('properties',{})forparaminrequired_params:ifparamnotinjson_dict:returnFalseparam_schema=properties.get(param,{})param_type=param_schema.get('type')param_items_type=param_schema.get('items',{}).get('type')ifparam_type=='array'andparam_items_type:ifnotisinstance(json_dict[param],list):returnFalseforiteminjson_dict[param]:ifnotisinstance(item,VALID_JSON_TYPE_MAPPING[param_items_type]):returnFalseelifparam_typeandnotisinstance(json_dict[param],VALID_JSON_TYPE_MAPPING[param_type]):returnFalseifisinstance(json_dict[param],dict)and'properties'inparam_schema:nested_schema=param_schemaifnotMistralStreamedResponse._validate_required_json_schema(json_dict[param],nested_schema):returnFalsereturnTrue