Skip to content

vllm.entrypoints.chat_utils

AudioURL

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class AudioURL(TypedDict, total=False):
    url: Required[str]
    """
    Either a URL of the audio or a data URL with base64 encoded audio data.
    """

url instance-attribute

url: Required[str]

Either a URL of the audio or a data URL with base64 encoded audio data.

BaseMultiModalItemTracker

Bases: ABC, Generic[_T]

Tracks multi-modal items in a given request and ensures that the number of multi-modal items in a given request does not exceed the configured maximum per prompt.

Source code in vllm/entrypoints/chat_utils.py
class BaseMultiModalItemTracker(ABC, Generic[_T]):
    """
    Tracks multi-modal items in a given request and ensures that the number
    of multi-modal items in a given request does not exceed the configured
    maximum per prompt.
    """

    def __init__(self, model_config: ModelConfig):
        super().__init__()

        self._model_config = model_config

        self._items_by_modality = defaultdict[str, list[_T]](list)
        # Track original modality for each vision_chunk item (image or video)
        self._modality_order = defaultdict[str, list[str]](list)

    @cached_property
    def use_unified_vision_chunk_modality(self) -> bool:
        """Check if model uses unified vision_chunk modality for images/videos."""
        return getattr(self._model_config.hf_config, "use_unified_vision_chunk", False)

    @property
    def model_config(self) -> ModelConfig:
        return self._model_config

    @cached_property
    def model_cls(self) -> type[SupportsMultiModal]:
        from vllm.model_executor.model_loader import get_model_cls

        model_cls = get_model_cls(self.model_config)
        return cast(type[SupportsMultiModal], model_cls)

    @property
    def allowed_local_media_path(self):
        return self._model_config.allowed_local_media_path

    @property
    def allowed_media_domains(self):
        return self._model_config.allowed_media_domains

    @property
    def mm_registry(self):
        return MULTIMODAL_REGISTRY

    @cached_property
    def mm_processor(self):
        return self.mm_registry.create_processor(self.model_config)

    def add(self, modality: ModalityStr, item: _T) -> str | None:
        """
        Add a multi-modal item to the current prompt and returns the
        placeholder string to use, if any.

        An optional uuid can be added which serves as a unique identifier of the
        media.
        """
        input_modality = modality.replace("_embeds", "")
        original_modality = modality
        use_vision_chunk = (
            self.use_unified_vision_chunk_modality
            and original_modality in ["video", "image"]
        )

        # If use_unified_vision_chunk_modality is enabled,
        # map image/video to vision_chunk
        if use_vision_chunk:
            # To avoid validation fail
            # because models with use_unified_vision_chunk_modality=True
            # will only accept vision_chunk modality.
            input_modality = "vision_chunk"
            num_items = len(self._items_by_modality[input_modality]) + 1
        else:
            num_items = len(self._items_by_modality[original_modality]) + 1

        mm_config = self.model_config.multimodal_config
        if (
            mm_config is not None
            and mm_config.enable_mm_embeds
            and mm_config.get_limit_per_prompt(input_modality) == 0
            and original_modality.endswith("_embeds")
        ):
            # Skip validation: embeddings bypass limit when enable_mm_embeds=True
            pass
        else:
            self.mm_processor.info.validate_num_items(input_modality, num_items)

        # Track original modality for vision_chunk items
        if use_vision_chunk:
            self._items_by_modality[input_modality].append(item)  # type: ignore
            self._modality_order["vision_chunk"].append(original_modality)
        else:
            self._items_by_modality[original_modality].append(item)

        return self.model_cls.get_placeholder_str(modality, num_items)

    @abstractmethod
    def create_parser(self) -> "BaseMultiModalContentParser":
        raise NotImplementedError

use_unified_vision_chunk_modality cached property

use_unified_vision_chunk_modality: bool

Check if model uses unified vision_chunk modality for images/videos.

add

add(modality: ModalityStr, item: _T) -> str | None

Add a multi-modal item to the current prompt and returns the placeholder string to use, if any.

An optional uuid can be added which serves as a unique identifier of the media.

Source code in vllm/entrypoints/chat_utils.py
def add(self, modality: ModalityStr, item: _T) -> str | None:
    """
    Add a multi-modal item to the current prompt and returns the
    placeholder string to use, if any.

    An optional uuid can be added which serves as a unique identifier of the
    media.
    """
    input_modality = modality.replace("_embeds", "")
    original_modality = modality
    use_vision_chunk = (
        self.use_unified_vision_chunk_modality
        and original_modality in ["video", "image"]
    )

    # If use_unified_vision_chunk_modality is enabled,
    # map image/video to vision_chunk
    if use_vision_chunk:
        # To avoid validation fail
        # because models with use_unified_vision_chunk_modality=True
        # will only accept vision_chunk modality.
        input_modality = "vision_chunk"
        num_items = len(self._items_by_modality[input_modality]) + 1
    else:
        num_items = len(self._items_by_modality[original_modality]) + 1

    mm_config = self.model_config.multimodal_config
    if (
        mm_config is not None
        and mm_config.enable_mm_embeds
        and mm_config.get_limit_per_prompt(input_modality) == 0
        and original_modality.endswith("_embeds")
    ):
        # Skip validation: embeddings bypass limit when enable_mm_embeds=True
        pass
    else:
        self.mm_processor.info.validate_num_items(input_modality, num_items)

    # Track original modality for vision_chunk items
    if use_vision_chunk:
        self._items_by_modality[input_modality].append(item)  # type: ignore
        self._modality_order["vision_chunk"].append(original_modality)
    else:
        self._items_by_modality[original_modality].append(item)

    return self.model_cls.get_placeholder_str(modality, num_items)

ChatCompletionContentPartAudioEmbedsParam

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class ChatCompletionContentPartAudioEmbedsParam(TypedDict, total=False):
    audio_embeds: str | dict[str, str] | None
    """
    The audio embeddings. It can be either:
    - A single base64 string representing a serialized torch tensor.
    - A dictionary where each value is a base64 string.
    """
    type: Required[Literal["audio_embeds"]]
    """The type of the content part."""
    uuid: str | None
    """
    User-provided UUID of a media. User must guarantee that it is properly
    generated and unique for different medias.
    """

audio_embeds instance-attribute

audio_embeds: str | dict[str, str] | None

The audio embeddings. It can be either: - A single base64 string representing a serialized torch tensor. - A dictionary where each value is a base64 string.

type instance-attribute

type: Required[Literal['audio_embeds']]

The type of the content part.

uuid instance-attribute

uuid: str | None

User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias.

ChatCompletionContentPartAudioParam

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class ChatCompletionContentPartAudioParam(TypedDict, total=False):
    audio_url: Required[AudioURL]

    type: Required[Literal["audio_url"]]
    """The type of the content part."""

type instance-attribute

type: Required[Literal['audio_url']]

The type of the content part.

ChatCompletionContentPartImageEmbedsParam

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class ChatCompletionContentPartImageEmbedsParam(TypedDict, total=False):
    image_embeds: str | dict[str, str] | None
    """
    The image embeddings. It can be either:
    - A single base64 string.
    - A dictionary where each value is a base64 string.
    """
    type: Required[Literal["image_embeds"]]
    """The type of the content part."""
    uuid: str | None
    """
    User-provided UUID of a media. User must guarantee that it is properly
    generated and unique for different medias.
    """

image_embeds instance-attribute

image_embeds: str | dict[str, str] | None

The image embeddings. It can be either: - A single base64 string. - A dictionary where each value is a base64 string.

type instance-attribute

type: Required[Literal['image_embeds']]

The type of the content part.

uuid instance-attribute

uuid: str | None

User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias.

ChatCompletionContentPartVideoParam

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class ChatCompletionContentPartVideoParam(TypedDict, total=False):
    video_url: Required[VideoURL]

    type: Required[Literal["video_url"]]
    """The type of the content part."""

type instance-attribute

type: Required[Literal['video_url']]

The type of the content part.

ChatTemplateResolutionError

Bases: ValueError

Raised when chat template resolution fails.

This is a subclass of ValueError for backward compatibility with existing exception handlers.

Source code in vllm/entrypoints/chat_utils.py
class ChatTemplateResolutionError(ValueError):
    """Raised when chat template resolution fails.

    This is a subclass of ValueError for backward compatibility with
    existing exception handlers.
    """

ConversationMessage

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class ConversationMessage(TypedDict, total=False):
    role: Required[str]
    """The role of the message's author."""

    content: str | None | list[dict[str, str]]
    """The contents of the message"""

    tool_call_id: str | None
    """Tool call that this message is responding to."""

    name: str | None
    """The name of the function to call"""

    tool_calls: Iterable[ChatCompletionMessageToolCallParam] | None
    """The tool calls generated by the model, such as function calls."""

    reasoning: str | None
    """The reasoning content for interleaved thinking."""

    reasoning_content: str | None
    """Deprecated: The reasoning content for interleaved thinking."""

    tools: list[ChatCompletionFunctionToolParam] | None
    """The tools for developer role."""

content instance-attribute

content: str | None | list[dict[str, str]]

The contents of the message

name instance-attribute

name: str | None

The name of the function to call

reasoning instance-attribute

reasoning: str | None

The reasoning content for interleaved thinking.

reasoning_content instance-attribute

reasoning_content: str | None

Deprecated: The reasoning content for interleaved thinking.

role instance-attribute

role: Required[str]

The role of the message's author.

tool_call_id instance-attribute

tool_call_id: str | None

Tool call that this message is responding to.

tool_calls instance-attribute

tool_calls: (
    Iterable[ChatCompletionMessageToolCallParam] | None
)

The tool calls generated by the model, such as function calls.

tools instance-attribute

tools: list[ChatCompletionFunctionToolParam] | None

The tools for developer role.

CustomChatCompletionContentPILImageParam

Bases: TypedDict

A simpler version of the param that only accepts a PIL image.

Example: { "image_pil": ImageAsset('cherry_blossom').pil_image }

Source code in vllm/entrypoints/chat_utils.py
class CustomChatCompletionContentPILImageParam(TypedDict, total=False):
    """A simpler version of the param that only accepts a PIL image.

    Example:
    {
        "image_pil": ImageAsset('cherry_blossom').pil_image
    }
    """

    image_pil: PILImage | None
    uuid: str | None
    """
    User-provided UUID of a media. User must guarantee that it is properly
    generated and unique for different medias.
    """

uuid instance-attribute

uuid: str | None

User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias.

CustomChatCompletionContentSimpleAudioParam

Bases: TypedDict

A simpler version of the param that only accepts a plain audio_url.

Example: { "audio_url": "https://example.com/audio.mp3" }

Source code in vllm/entrypoints/chat_utils.py
class CustomChatCompletionContentSimpleAudioParam(TypedDict, total=False):
    """A simpler version of the param that only accepts a plain audio_url.

    Example:
    {
        "audio_url": "https://example.com/audio.mp3"
    }
    """

    audio_url: str | None

CustomChatCompletionContentSimpleImageParam

Bases: TypedDict

A simpler version of the param that only accepts a plain image_url. This is supported by OpenAI API, although it is not documented.

Example: { "image_url": "https://example.com/image.jpg" }

Source code in vllm/entrypoints/chat_utils.py
class CustomChatCompletionContentSimpleImageParam(TypedDict, total=False):
    """A simpler version of the param that only accepts a plain image_url.
    This is supported by OpenAI API, although it is not documented.

    Example:
    {
        "image_url": "https://example.com/image.jpg"
    }
    """

    image_url: str | None
    uuid: str | None
    """
    User-provided UUID of a media. User must guarantee that it is properly
    generated and unique for different medias.
    """

uuid instance-attribute

uuid: str | None

User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias.

CustomChatCompletionContentSimpleVideoParam

Bases: TypedDict

A simpler version of the param that only accepts a plain audio_url.

Example: { "video_url": "https://example.com/video.mp4" }

Source code in vllm/entrypoints/chat_utils.py
class CustomChatCompletionContentSimpleVideoParam(TypedDict, total=False):
    """A simpler version of the param that only accepts a plain audio_url.

    Example:
    {
        "video_url": "https://example.com/video.mp4"
    }
    """

    video_url: str | None
    uuid: str | None
    """
    User-provided UUID of a media. User must guarantee that it is properly
    generated and unique for different medias.
    """

uuid instance-attribute

uuid: str | None

User-provided UUID of a media. User must guarantee that it is properly generated and unique for different medias.

CustomChatCompletionMessageParam

Bases: TypedDict

Enables custom roles in the Chat Completion API.

Source code in vllm/entrypoints/chat_utils.py
class CustomChatCompletionMessageParam(TypedDict, total=False):
    """Enables custom roles in the Chat Completion API."""

    role: Required[str]
    """The role of the message's author."""

    content: str | list[ChatCompletionContentPartParam]
    """The contents of the message."""

    name: str
    """An optional name for the participant.

    Provides the model information to differentiate between participants of the
    same role.
    """

    tool_call_id: str | None
    """Tool call that this message is responding to."""

    tool_calls: Iterable[ChatCompletionMessageToolCallParam] | None
    """The tool calls generated by the model, such as function calls."""

    reasoning: str | None
    """The reasoning content for interleaved thinking."""

    tools: list[ChatCompletionFunctionToolParam] | None
    """The tools for developer role."""

content instance-attribute

content: str | list[ChatCompletionContentPartParam]

The contents of the message.

name instance-attribute

name: str

An optional name for the participant.

Provides the model information to differentiate between participants of the same role.

reasoning instance-attribute

reasoning: str | None

The reasoning content for interleaved thinking.

role instance-attribute

role: Required[str]

The role of the message's author.

tool_call_id instance-attribute

tool_call_id: str | None

Tool call that this message is responding to.

tool_calls instance-attribute

tool_calls: (
    Iterable[ChatCompletionMessageToolCallParam] | None
)

The tool calls generated by the model, such as function calls.

tools instance-attribute

tools: list[ChatCompletionFunctionToolParam] | None

The tools for developer role.

CustomThinkCompletionContentParam

Bases: TypedDict

A Think Completion Content Param that accepts a plain text and a boolean.

Example: { "thinking": "I am thinking about the answer", "closed": True, "type": "thinking" }

Source code in vllm/entrypoints/chat_utils.py
class CustomThinkCompletionContentParam(TypedDict, total=False):
    """A Think Completion Content Param that accepts a plain text and a boolean.

    Example:
    {
        "thinking": "I am thinking about the answer",
        "closed": True,
        "type": "thinking"
    }
    """

    thinking: Required[str]
    """The thinking content."""

    closed: bool
    """Whether the thinking is closed."""

    type: Required[Literal["thinking"]]
    """The thinking type."""

closed instance-attribute

closed: bool

Whether the thinking is closed.

thinking instance-attribute

thinking: Required[str]

The thinking content.

type instance-attribute

type: Required[Literal['thinking']]

The thinking type.

PILImage

Bases: BaseModel

A PIL.Image.Image object.

Source code in vllm/entrypoints/chat_utils.py
class PILImage(BaseModel):
    """
    A PIL.Image.Image object.
    """

    image_pil: Image.Image
    model_config = ConfigDict(arbitrary_types_allowed=True)

VideoURL

Bases: TypedDict

Source code in vllm/entrypoints/chat_utils.py
class VideoURL(TypedDict, total=False):
    url: Required[str]
    """
    Either a URL of the video or a data URL with base64 encoded video data.
    """

url instance-attribute

url: Required[str]

Either a URL of the video or a data URL with base64 encoded video data.

_get_full_multimodal_text_prompt

_get_full_multimodal_text_prompt(
    placeholder_storage: dict[str, list],
    texts: list[str],
    interleave_strings: bool,
) -> str

Combine multimodal prompts for a multimodal language model.

Source code in vllm/entrypoints/chat_utils.py
def _get_full_multimodal_text_prompt(
    placeholder_storage: dict[str, list],
    texts: list[str],
    interleave_strings: bool,
) -> str:
    """Combine multimodal prompts for a multimodal language model."""

    # flatten storage to make it looks like
    # {
    #   "<|image|>": 2,
    #   "<|audio|>": 1
    # }
    placeholder_counts = Counter(
        [v for elem in placeholder_storage.values() for v in elem]
    )

    if interleave_strings:
        text_prompt = _get_interleaved_text_prompt(placeholder_storage, texts)
    else:
        text_prompt = "\n".join(texts)

    # Pass interleaved text further in case the user used image placeholders
    # himself, but forgot to disable the 'interleave_strings' flag

    # Look through the text prompt to check for missing placeholders
    missing_placeholders: list[str] = []
    for placeholder in placeholder_counts:
        # For any existing placeholder in the text prompt, we leave it as is
        placeholder_counts[placeholder] -= text_prompt.count(placeholder)

        if placeholder_counts[placeholder] < 0:
            logger.error(
                "Placeholder count is negative! "
                "Ensure that the 'interleave_strings' flag is disabled "
                "(current value: %s) "
                "when manually placing image placeholders.",
                interleave_strings,
            )
            logger.debug("Input prompt: %s", text_prompt)
            raise ValueError(
                f"Found more '{placeholder}' placeholders in input prompt than "
                "actual multimodal data items."
            )

        missing_placeholders.extend([placeholder] * placeholder_counts[placeholder])

    # NOTE: Default behaviour: we always add missing placeholders
    # at the front of the prompt, if interleave_strings=False
    if text_prompt:
        return "\n".join(missing_placeholders + [text_prompt])
    else:
        return "\n".join(missing_placeholders)

_parse_chat_message_content_mm_part

_parse_chat_message_content_mm_part(
    part: ChatCompletionContentPartParam,
) -> tuple[str, _ContentPart]

Parses a given multi-modal content part based on its type.

Parameters:

Name Type Description Default
part ChatCompletionContentPartParam

A dict containing the content part, with a potential 'type' field.

required

Returns:

Type Description
str

A tuple (part_type, content) where:

_ContentPart
  • part_type: Type of the part (e.g., 'text', 'image_url').
tuple[str, _ContentPart]
  • content: Parsed content (e.g., text, image URL).

Raises:

Type Description
ValueError

If the 'type' field is missing and no direct URL is found.

Source code in vllm/entrypoints/chat_utils.py
def _parse_chat_message_content_mm_part(
    part: ChatCompletionContentPartParam,
) -> tuple[str, _ContentPart]:
    """
    Parses a given multi-modal content part based on its type.

    Args:
        part: A dict containing the content part, with a potential 'type' field.

    Returns:
        A tuple (part_type, content) where:
        - part_type: Type of the part (e.g., 'text', 'image_url').
        - content: Parsed content (e.g., text, image URL).

    Raises:
        ValueError: If the 'type' field is missing and no direct URL is found.
    """
    assert isinstance(
        part, dict
    )  # This is needed to avoid mypy errors: part.get() from str
    part_type = part.get("type", None)
    uuid = part.get("uuid", None)

    if isinstance(part_type, str) and part_type in MM_PARSER_MAP and uuid is None:  # noqa: E501
        content = MM_PARSER_MAP[part_type](part)

        # Special case for 'image_url.detail'
        # We only support 'auto', which is the default
        if part_type == "image_url" and part.get("detail", "auto") != "auto":
            logger.warning(
                "'image_url.detail' is currently not supported and will be ignored."
            )

        return part_type, content

    # Handle missing 'type' but provided direct URL fields.
    # 'type' is required field by pydantic
    if part_type is None or uuid is not None:
        if "image_url" in part:
            image_params = cast(CustomChatCompletionContentSimpleImageParam, part)
            image_url = image_params.get("image_url", None)
            if isinstance(image_url, dict):
                # Can potentially happen if user provides a uuid
                # with url as a dict of {"url": url}
                image_url = image_url.get("url", None)
            return "image_url", image_url
        if "image_pil" in part:
            # "image_pil" could be None if UUID is provided.
            image_params = cast(  # type: ignore
                CustomChatCompletionContentPILImageParam, part
            )
            image_pil = image_params.get("image_pil", None)
            return "image_pil", image_pil
        if "image_embeds" in part:
            # "image_embeds" could be None if UUID is provided.
            image_params = cast(  # type: ignore
                ChatCompletionContentPartImageEmbedsParam, part
            )
            image_embeds = image_params.get("image_embeds", None)
            return "image_embeds", image_embeds
        if "audio_embeds" in part:
            # "audio_embeds" could be None if UUID is provided.
            audio_params = cast(  # type: ignore[assignment]
                ChatCompletionContentPartAudioEmbedsParam, part
            )
            audio_embeds = audio_params.get("audio_embeds", None)
            return "audio_embeds", audio_embeds
        if "audio_url" in part:
            audio_params = cast(  # type: ignore[assignment]
                CustomChatCompletionContentSimpleAudioParam, part
            )
            audio_url = audio_params.get("audio_url", None)
            if isinstance(audio_url, dict):
                # Can potentially happen if user provides a uuid
                # with url as a dict of {"url": url}
                audio_url = audio_url.get("url", None)
            return "audio_url", audio_url
        if part.get("input_audio") is not None:
            input_audio_params = cast(dict[str, str], part)
            return "input_audio", input_audio_params
        if "video_url" in part:
            video_params = cast(CustomChatCompletionContentSimpleVideoParam, part)
            video_url = video_params.get("video_url", None)
            if isinstance(video_url, dict):
                # Can potentially happen if user provides a uuid
                # with url as a dict of {"url": url}
                video_url = video_url.get("url", None)
            return "video_url", video_url
        # Raise an error if no 'type' or direct URL is found.
        raise ValueError("Missing 'type' field in multimodal part.")

    if not isinstance(part_type, str):
        raise ValueError("Invalid 'type' field in multimodal part.")
    return part_type, "unknown part_type content"

_parse_chat_message_content_part

_parse_chat_message_content_part(
    part: ChatCompletionContentPartParam,
    mm_parser: BaseMultiModalContentParser,
    *,
    wrap_dicts: bool,
    interleave_strings: bool,
) -> _ContentPart | None

Parses a single part of a conversation. If wrap_dicts is True, structured dictionary pieces for texts and images will be wrapped in dictionaries, i.e., {"type": "text", "text", ...} and {"type": "image"}, respectively. Otherwise multimodal data will be handled by mm_parser, and texts will be returned as strings to be joined with multimodal placeholders.

Source code in vllm/entrypoints/chat_utils.py
def _parse_chat_message_content_part(
    part: ChatCompletionContentPartParam,
    mm_parser: BaseMultiModalContentParser,
    *,
    wrap_dicts: bool,
    interleave_strings: bool,
) -> _ContentPart | None:
    """Parses a single part of a conversation. If wrap_dicts is True,
    structured dictionary pieces for texts and images will be
    wrapped in dictionaries, i.e., {"type": "text", "text", ...} and
    {"type": "image"}, respectively. Otherwise multimodal data will be
    handled by mm_parser, and texts will be returned as strings to be joined
    with multimodal placeholders.
    """
    if isinstance(part, str):  # Handle plain text parts
        return part
    # Handle structured dictionary parts
    part_type, content = _parse_chat_message_content_mm_part(part)
    # if part_type is text/refusal/image_url/audio_url/video_url/input_audio but
    # content is None, log a warning and skip
    if part_type in PART_TYPES_TO_SKIP_NONE_CONTENT and content is None:
        logger.warning(
            "Skipping multimodal part '%s' (type: '%s') "
            "with empty / unparsable content.",
            part,
            part_type,
        )
        return None

    if part_type in ("text", "input_text", "output_text", "refusal", "thinking"):
        str_content = cast(str, content)
        if wrap_dicts:
            return {"type": "text", "text": str_content}
        else:
            return str_content

    # For media items, if a user has provided one, use it. Otherwise, insert
    # a placeholder empty uuid.
    uuid = part.get("uuid", None)
    if uuid is not None:
        uuid = str(uuid)

    modality = None
    if part_type == "image_pil":
        image_content = cast(Image.Image, content) if content is not None else None
        mm_parser.parse_image_pil(image_content, uuid)
        modality = "image"
    elif part_type in ("image_url", "input_image"):
        str_content = cast(str, content)
        mm_parser.parse_image(str_content, uuid)
        modality = "image"
    elif part_type == "image_embeds":
        content = cast(str | dict[str, str], content) if content is not None else None
        mm_parser.parse_image_embeds(content, uuid)
        modality = "image"
    elif part_type == "audio_embeds":
        content = cast(str | dict[str, str], content) if content is not None else None
        mm_parser.parse_audio_embeds(content, uuid)
        modality = "audio"
    elif part_type == "audio_url":
        str_content = cast(str, content)
        mm_parser.parse_audio(str_content, uuid)
        modality = "audio"
    elif part_type == "input_audio":
        dict_content = cast(InputAudio, content)
        mm_parser.parse_input_audio(dict_content, uuid)
        modality = "audio"
    elif part_type == "video_url":
        str_content = cast(str, content)
        mm_parser.parse_video(str_content, uuid)
        modality = "video"
    else:
        raise NotImplementedError(f"Unknown part type: {part_type}")

    return (
        {"type": modality}
        if wrap_dicts
        else (MODALITY_PLACEHOLDERS_MAP[modality] if interleave_strings else None)
    )

validate_chat_template

validate_chat_template(chat_template: Path | str | None)

Raises if the provided chat template appears invalid.

Source code in vllm/entrypoints/chat_utils.py
def validate_chat_template(chat_template: Path | str | None):
    """Raises if the provided chat template appears invalid."""
    if chat_template is None:
        return

    elif isinstance(chat_template, Path) and not chat_template.exists():
        raise FileNotFoundError("the supplied chat template path doesn't exist")

    elif isinstance(chat_template, str):
        JINJA_CHARS = "{}\n"
        if (
            not any(c in chat_template for c in JINJA_CHARS)
            and not Path(chat_template).exists()
        ):
            # Try to find the template in the built-in templates directory
            from vllm.transformers_utils.chat_templates.registry import (
                CHAT_TEMPLATES_DIR,
            )

            builtin_template_path = CHAT_TEMPLATES_DIR / chat_template
            if not builtin_template_path.exists():
                raise ValueError(
                    f"The supplied chat template string ({chat_template}) "
                    f"appears path-like, but doesn't exist! "
                    f"Tried: {chat_template} and {builtin_template_path}"
                )

    else:
        raise TypeError(f"{type(chat_template)} is not a valid chat template type")