Skip to content

vllm.reasoning.olmo3_reasoning_parser

Olmo3ReasoningParser

Bases: ReasoningParser

Reasoning parser for Olmo 3 model

Olmo3ReasoningParser

This class implements a reasoning parser specifically designed for the Olmo 3 family of models. Olmo 3 models do not use special tokens to indicate reasoning; rather, reasoning trace is wrapped in <think> and </think>, which are tokenized using standard vocabulary entries. Because of this, the parser operates in string space, accumulating the characters in a buffer until it sees <think> or </think>. tokens to switch modes.

Key Features
  • For non-stream output, Recognizes and extracts reasoning (text bracketed by <think> and </think>) and content (everything after the first </think>).
  • For stream process, it uses a buffer to accumulate delta text, and output progressive delta messages as soon as thinking starts or ends.
  • For reliability, some Olmo 3 models may hardcode the first <think> token is the input text (similar to Deepseek R1, or reasoning-only Qwen models). To support such variants, the parser can optionally work in cases where the first <think> token is missing from generation.
Source code in vllm/reasoning/olmo3_reasoning_parser.py
class Olmo3ReasoningParser(ReasoningParser):
    """
    Reasoning parser for Olmo 3 model

    Olmo3ReasoningParser

    This class implements a reasoning parser specifically designed for the
    Olmo 3 family of models. Olmo 3 models do not use special tokens to
    indicate reasoning; rather, reasoning trace is wrapped in `<think>` and
    `</think>`, which are tokenized using standard vocabulary entries.
    Because of this, the parser operates in string space, accumulating the
    characters in a buffer until it sees `<think>` or `</think>`. tokens
    to switch modes.

    Key Features:
        - For non-stream output, Recognizes and extracts reasoning (text
          bracketed by `<think>` and `</think>`) and content (everything
          after the first `</think>`).
        - For stream process, it uses a buffer to accumulate delta text,
          and output progressive delta messages as soon as thinking starts
          or ends.
        - For reliability, some Olmo 3 models may hardcode the first
          `<think>` token is the input text (similar to Deepseek R1,
          or reasoning-only Qwen models). To support such variants, the
          parser can optionally work in cases where the first `<think>`
          token is missing from generation.
    """

    def __init__(self, tokenizer: "TokenizerLike", *args, **kwargs):
        super().__init__(tokenizer, *args, **kwargs)

        self.think_start = r"<think>"
        self.think_end = r"</think>"

        # notice that the first think is optional; this allows template to
        # work in cases when we hardcode a <think> at the beginning of the
        # reasoning template.
        reasoning_expr = (
            rf"^(?:{self.think_start})?(?P<reasoning>.*?)"
            rf"{self.think_end}(?P<content>.*)$"
        )
        self.reasoning_regex = re.compile(reasoning_expr, re.DOTALL)

        self.buffer = Olmo3ReasoningBuffer(
            think_start=self.think_start, think_end=self.think_end
        )

    def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
        text = self.model_tokenizer.decode(input_ids)
        return self.think_end in text

    def extract_content_ids(self, input_ids: list[int]) -> list[int]:
        # for Olmo 3 streaming reason parsing, the stream parse
        # will call first, and the same token will be called in
        # is_reasoning_end and extract_content_ids
        # this id is not part of content, so just return [] here.
        return []

    def extract_reasoning(
        self,
        model_output: str,
        request: ChatCompletionRequest | ResponsesRequest,
    ) -> tuple[str | None, str | None]:
        """Extract the reasoning content & content sections, respectively.
        If the sequence doesn't match what we expect, i.e., the model generates
        something else, all content is considered non-reasoning content.

        Args:
            model_output (str): Output of the model to be parsed.
            request (ChatCompletionRequest | ResponsesRequest): Request being
                processed.

        Returns:
            tuple[Optional[str], Optional[str]]: Tuple pair containing the
            reasoning content and non-reasoning content.
        """

        re_match = self.reasoning_regex.match(model_output)
        if re_match:
            reasoning = re_match.group("reasoning") or None
            content = re_match.group("content") or None
            return reasoning, content

        # no reasoning content
        return None, model_output

    def extract_reasoning_streaming(
        self,
        previous_text: str,
        current_text: str,
        delta_text: str,
        previous_token_ids: Sequence[int],
        current_token_ids: Sequence[int],
        delta_token_ids: Sequence[int],
    ) -> DeltaMessage | None:
        """Extract content using token ID sequence state machine"""

        delta_message = self.buffer.add_text(delta_text)
        if delta_message is None and self.buffer.think_end in self.buffer.buffer:
            # this is a bit hacky, but, because of how the buffer is
            # constructed, if the last delta_text contains characters that
            # marks the end of thinking tokens, then messages in the buffer
            # would never be processed because we get no other turn. To get
            # around that, we check if the text buffer contains the end of
            # thinking tokens, and, if so, we reprocess the buffer again.
            delta_message = self.buffer.process_buffer()

        return delta_message

extract_reasoning

extract_reasoning(
    model_output: str,
    request: ChatCompletionRequest | ResponsesRequest,
) -> tuple[str | None, str | None]

Extract the reasoning content & content sections, respectively. If the sequence doesn't match what we expect, i.e., the model generates something else, all content is considered non-reasoning content.

Parameters:

Name Type Description Default
model_output str

Output of the model to be parsed.

required
request ChatCompletionRequest | ResponsesRequest

Request being processed.

required

Returns:

Type Description
str | None

tuple[Optional[str], Optional[str]]: Tuple pair containing the

str | None

reasoning content and non-reasoning content.

Source code in vllm/reasoning/olmo3_reasoning_parser.py
def extract_reasoning(
    self,
    model_output: str,
    request: ChatCompletionRequest | ResponsesRequest,
) -> tuple[str | None, str | None]:
    """Extract the reasoning content & content sections, respectively.
    If the sequence doesn't match what we expect, i.e., the model generates
    something else, all content is considered non-reasoning content.

    Args:
        model_output (str): Output of the model to be parsed.
        request (ChatCompletionRequest | ResponsesRequest): Request being
            processed.

    Returns:
        tuple[Optional[str], Optional[str]]: Tuple pair containing the
        reasoning content and non-reasoning content.
    """

    re_match = self.reasoning_regex.match(model_output)
    if re_match:
        reasoning = re_match.group("reasoning") or None
        content = re_match.group("content") or None
        return reasoning, content

    # no reasoning content
    return None, model_output

extract_reasoning_streaming

extract_reasoning_streaming(
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None

Extract content using token ID sequence state machine

Source code in vllm/reasoning/olmo3_reasoning_parser.py
def extract_reasoning_streaming(
    self,
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
    """Extract content using token ID sequence state machine"""

    delta_message = self.buffer.add_text(delta_text)
    if delta_message is None and self.buffer.think_end in self.buffer.buffer:
        # this is a bit hacky, but, because of how the buffer is
        # constructed, if the last delta_text contains characters that
        # marks the end of thinking tokens, then messages in the buffer
        # would never be processed because we get no other turn. To get
        # around that, we check if the text buffer contains the end of
        # thinking tokens, and, if so, we reprocess the buffer again.
        delta_message = self.buffer.process_buffer()

    return delta_message

string_overlap

string_overlap(
    a: str, b: str
) -> tuple[Indices | None, Indices | None]

Find the longest overlap where the end of string a matches the start of string b.

Parameters:

Name Type Description Default
a str

First string

required
b str

Second string

required

Returns:

Type Description
Indices | None

Tuple of IndicesTuples representing the overlapping portions in each

Indices | None

string, or a tuple of None if no overlap exists

Source code in vllm/reasoning/olmo3_reasoning_parser.py
def string_overlap(a: str, b: str) -> tuple[Indices | None, Indices | None]:
    """
    Find the longest overlap where the end of string a matches the start
    of string b.

    Args:
        a: First string
        b: Second string

    Returns:
        Tuple of IndicesTuples representing the overlapping portions in each
        string, or a tuple of None if no overlap exists
    """

    # swap so a is always the shorter string
    a, b, swap = (a, b, False) if len(a) < len(b) else (b, a, True)

    # first check: is a fully contained in b?
    if a in b:
        ind_a = Indices(0, len(a))
        ind_b = Indices(b.index(a), b.index(a) + len(a))
        return (ind_b, ind_a) if swap else (ind_a, ind_b)

    # second check: does the end of a overlap with the
    #               beginning of b?
    for i in range(len(a) - 1, 0, -1):
        if a[-i:] == b[:i]:
            ind_a = Indices(len(a) - i, len(a))
            ind_b = Indices(0, i)
            return (ind_b, ind_a) if swap else (ind_a, ind_b)

    # third check: does the beginning of a overlap with
    #              the end of b?
    for i in range(len(a) - 1, 0, -1):
        if b[-i:] == a[:i]:
            ind_a = Indices(0, i)
            ind_b = Indices(len(b) - i, len(b))
            return (ind_b, ind_a) if swap else (ind_a, ind_b)

    return None, None