跳转至

Run State

RunState class for serializing and resuming agent runs with human-in-the-loop support.

RunState dataclass

Bases: Generic[TContext, TAgent]

Serializable snapshot of an agent run, including context, usage, and interruptions.

Source code in src/agents/run_state.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
@dataclass
class RunState(Generic[TContext, TAgent]):
    """Serializable snapshot of an agent run, including context, usage, and interruptions."""

    _current_turn: int = 0
    """Current turn number in the conversation."""

    _current_agent: TAgent | None = None
    """The agent currently handling the conversation."""

    _original_input: str | list[Any] = field(default_factory=list)
    """Original user input prior to any processing."""

    _model_responses: list[ModelResponse] = field(default_factory=list)
    """Responses from the model so far."""

    _context: RunContextWrapper[TContext] | None = None
    """Run context tracking approvals, usage, and other metadata."""

    _generated_items: list[RunItem] = field(default_factory=list)
    """Items used to build model input when resuming; may be filtered by handoffs."""

    _session_items: list[RunItem] = field(default_factory=list)
    """Full, unfiltered run items for session history."""

    _max_turns: int = 10
    """Maximum allowed turns before forcing termination."""

    _conversation_id: str | None = None
    """Conversation identifier for server-managed conversation tracking."""

    _previous_response_id: str | None = None
    """Response identifier of the last server-managed response."""

    _auto_previous_response_id: bool = False
    """Whether the previous response id should be automatically tracked."""

    _input_guardrail_results: list[InputGuardrailResult] = field(default_factory=list)
    """Results from input guardrails applied to the run."""

    _output_guardrail_results: list[OutputGuardrailResult] = field(default_factory=list)
    """Results from output guardrails applied to the run."""

    _tool_input_guardrail_results: list[ToolInputGuardrailResult] = field(default_factory=list)
    """Results from tool input guardrails applied during the run."""

    _tool_output_guardrail_results: list[ToolOutputGuardrailResult] = field(default_factory=list)
    """Results from tool output guardrails applied during the run."""

    _current_step: NextStepInterruption | None = None
    """Current step if the run is interrupted (e.g., for tool approval)."""

    _last_processed_response: ProcessedResponse | None = None
    """The last processed model response. This is needed for resuming from interruptions."""

    _current_turn_persisted_item_count: int = 0
    """Tracks how many items from this turn were already written to the session."""

    _tool_use_tracker_snapshot: dict[str, list[str]] = field(default_factory=dict)
    """Serialized snapshot of the AgentToolUseTracker (agent name -> tools used)."""

    _trace_state: TraceState | None = field(default=None, repr=False)
    """Serialized trace metadata for resuming tracing context."""

    def __init__(
        self,
        context: RunContextWrapper[TContext],
        original_input: str | list[Any],
        starting_agent: TAgent,
        max_turns: int = 10,
        *,
        conversation_id: str | None = None,
        previous_response_id: str | None = None,
        auto_previous_response_id: bool = False,
    ):
        """Initialize a new RunState."""
        self._context = context
        self._original_input = _clone_original_input(original_input)
        self._current_agent = starting_agent
        self._max_turns = max_turns
        self._conversation_id = conversation_id
        self._previous_response_id = previous_response_id
        self._auto_previous_response_id = auto_previous_response_id
        self._model_responses = []
        self._generated_items = []
        self._session_items = []
        self._input_guardrail_results = []
        self._output_guardrail_results = []
        self._tool_input_guardrail_results = []
        self._tool_output_guardrail_results = []
        self._current_step = None
        self._current_turn = 0
        self._last_processed_response = None
        self._current_turn_persisted_item_count = 0
        self._tool_use_tracker_snapshot = {}
        self._trace_state = None

    def get_interruptions(self) -> list[ToolApprovalItem]:
        """Return pending interruptions if the current step is an interruption."""
        # Import at runtime to avoid circular import
        from .run_internal.run_steps import NextStepInterruption

        if self._current_step is None or not isinstance(self._current_step, NextStepInterruption):
            return []
        return self._current_step.interruptions

    def approve(self, approval_item: ToolApprovalItem, always_approve: bool = False) -> None:
        """Approve a tool call and rerun with this state to continue."""
        if self._context is None:
            raise UserError("Cannot approve tool: RunState has no context")
        self._context.approve_tool(approval_item, always_approve=always_approve)

    def reject(self, approval_item: ToolApprovalItem, always_reject: bool = False) -> None:
        """Reject a tool call and rerun with this state to continue."""
        if self._context is None:
            raise UserError("Cannot reject tool: RunState has no context")
        self._context.reject_tool(approval_item, always_reject=always_reject)

    def _serialize_approvals(self) -> dict[str, dict[str, Any]]:
        """Serialize approval records into a JSON-friendly mapping."""
        if self._context is None:
            return {}
        approvals_dict: dict[str, dict[str, Any]] = {}
        for tool_name, record in self._context._approvals.items():
            approvals_dict[tool_name] = {
                "approved": record.approved
                if isinstance(record.approved, bool)
                else list(record.approved),
                "rejected": record.rejected
                if isinstance(record.rejected, bool)
                else list(record.rejected),
            }
        return approvals_dict

    def _serialize_model_responses(self) -> list[dict[str, Any]]:
        """Serialize model responses."""
        return [
            {
                "usage": serialize_usage(resp.usage),
                "output": [_serialize_raw_item_value(item) for item in resp.output],
                "response_id": resp.response_id,
            }
            for resp in self._model_responses
        ]

    def _serialize_original_input(self) -> str | list[Any]:
        """Normalize original input into the shape expected by Responses API."""
        if not isinstance(self._original_input, list):
            return self._original_input

        normalized_items = []
        for item in self._original_input:
            normalized_item = _serialize_raw_item_value(item)
            if isinstance(normalized_item, dict):
                normalized_item = dict(normalized_item)
                role = normalized_item.get("role")
                if role == "assistant":
                    content = normalized_item.get("content")
                    if isinstance(content, str):
                        normalized_item["content"] = [{"type": "output_text", "text": content}]
                    if "status" not in normalized_item:
                        normalized_item["status"] = "completed"
            normalized_items.append(normalized_item)
        return normalized_items

    def _serialize_context_payload(
        self,
        *,
        context_serializer: ContextSerializer | None = None,
        strict_context: bool = False,
    ) -> tuple[dict[str, Any] | None, dict[str, Any]]:
        """Validate and serialize the stored run context."""
        if self._context is None:
            return None, _build_context_meta(
                None,
                serialized_via="none",
                requires_deserializer=False,
                omitted=False,
            )

        raw_context_payload = self._context.context
        if raw_context_payload is None:
            return None, _build_context_meta(
                raw_context_payload,
                serialized_via="none",
                requires_deserializer=False,
                omitted=False,
            )

        if isinstance(raw_context_payload, Mapping):
            return (
                dict(raw_context_payload),
                _build_context_meta(
                    raw_context_payload,
                    serialized_via="mapping",
                    requires_deserializer=False,
                    omitted=False,
                ),
            )

        if strict_context and context_serializer is None:
            # Avoid silently dropping non-mapping context data when strict mode is requested.
            raise UserError(
                "RunState serialization requires context to be a mapping when strict_context "
                "is True. Provide context_serializer to serialize custom contexts."
            )

        if context_serializer is not None:
            try:
                serialized = context_serializer(raw_context_payload)
            except Exception as exc:
                raise UserError(
                    "Context serializer failed while serializing RunState context."
                ) from exc
            if not isinstance(serialized, Mapping):
                raise UserError("Context serializer must return a mapping.")
            return (
                dict(serialized),
                _build_context_meta(
                    raw_context_payload,
                    serialized_via="context_serializer",
                    requires_deserializer=True,
                    omitted=False,
                ),
            )

        if hasattr(raw_context_payload, "model_dump"):
            try:
                serialized = raw_context_payload.model_dump(exclude_unset=True)
            except TypeError:
                serialized = raw_context_payload.model_dump()
            if not isinstance(serialized, Mapping):
                raise UserError("RunState context model_dump must return a mapping.")
            # We can persist the data, but the original type is lost unless the caller rebuilds it.
            logger.warning(
                "RunState context was serialized from a Pydantic model. "
                "Provide context_deserializer or context_override to restore the original type."
            )
            return (
                dict(serialized),
                _build_context_meta(
                    raw_context_payload,
                    serialized_via="model_dump",
                    requires_deserializer=True,
                    omitted=False,
                ),
            )

        if dataclasses.is_dataclass(raw_context_payload):
            serialized = dataclasses.asdict(cast(Any, raw_context_payload))
            if not isinstance(serialized, Mapping):
                raise UserError("RunState dataclass context must serialize to a mapping.")
            # Dataclass instances serialize to dicts, so reconstruction requires a deserializer.
            logger.warning(
                "RunState context was serialized from a dataclass. "
                "Provide context_deserializer or context_override to restore the original type."
            )
            return (
                dict(serialized),
                _build_context_meta(
                    raw_context_payload,
                    serialized_via="asdict",
                    requires_deserializer=True,
                    omitted=False,
                ),
            )

        # Fall back to an empty dict so the run state remains serializable, but
        # explicitly warn because the original context will be unavailable on restore.
        logger.warning(
            "RunState context of type %s is not serializable; storing empty context. "
            "Provide context_serializer to preserve it.",
            type(raw_context_payload).__name__,
        )
        return (
            {},
            _build_context_meta(
                raw_context_payload,
                serialized_via="omitted",
                requires_deserializer=True,
                omitted=True,
            ),
        )

    def _serialize_tool_input(self, tool_input: Any) -> Any:
        """Normalize tool input for JSON serialization."""
        if tool_input is None:
            return None

        if dataclasses.is_dataclass(tool_input):
            return dataclasses.asdict(cast(Any, tool_input))

        if hasattr(tool_input, "model_dump"):
            try:
                serialized = tool_input.model_dump(exclude_unset=True)
            except TypeError:
                serialized = tool_input.model_dump()
            return _to_dump_compatible(serialized)

        return _to_dump_compatible(tool_input)

    def _merge_generated_items_with_processed(self) -> list[RunItem]:
        """Merge persisted and newly processed items without duplication."""
        generated_items = list(self._generated_items)
        if not (self._last_processed_response and self._last_processed_response.new_items):
            return generated_items

        seen_id_types: set[tuple[str, str]] = set()
        seen_call_ids: set[str] = set()
        seen_call_id_types: set[tuple[str, str]] = set()

        def _id_type_call(item: Any) -> tuple[str | None, str | None, str | None]:
            item_id = None
            item_type = None
            call_id = None
            if hasattr(item, "raw_item"):
                raw = item.raw_item
                if isinstance(raw, dict):
                    item_id = raw.get("id")
                    item_type = raw.get("type")
                    call_id = raw.get("call_id")
                else:
                    item_id = _get_attr(raw, "id")
                    item_type = _get_attr(raw, "type")
                    call_id = _get_attr(raw, "call_id")
            if item_id is None and hasattr(item, "id"):
                item_id = _get_attr(item, "id")
            if item_type is None and hasattr(item, "type"):
                item_type = _get_attr(item, "type")
            return item_id, item_type, call_id

        for existing in generated_items:
            item_id, item_type, call_id = _id_type_call(existing)
            if item_id and item_type:
                seen_id_types.add((item_id, item_type))
            if call_id and item_type:
                seen_call_id_types.add((call_id, item_type))
            elif call_id:
                seen_call_ids.add(call_id)

        for new_item in self._last_processed_response.new_items:
            item_id, item_type, call_id = _id_type_call(new_item)
            if call_id and item_type:
                if (call_id, item_type) in seen_call_id_types:
                    continue
            elif call_id and call_id in seen_call_ids:
                continue
            if item_id and item_type and (item_id, item_type) in seen_id_types:
                continue
            if item_id and item_type:
                seen_id_types.add((item_id, item_type))
            if call_id and item_type:
                seen_call_id_types.add((call_id, item_type))
            elif call_id:
                seen_call_ids.add(call_id)
            generated_items.append(new_item)
        return generated_items

    def to_json(
        self,
        *,
        context_serializer: ContextSerializer | None = None,
        strict_context: bool = False,
        include_tracing_api_key: bool = False,
    ) -> dict[str, Any]:
        """Serializes the run state to a JSON-compatible dictionary.

        This method is used to serialize the run state to a dictionary that can be used to
        resume the run later.

        Args:
            context_serializer: Optional function to serialize non-mapping context values.
            strict_context: When True, require mapping contexts or a context_serializer.
            include_tracing_api_key: When True, include the tracing API key in the trace payload.

        Returns:
            A dictionary representation of the run state.

        Raises:
            UserError: If required state (agent, context) is missing.
        """
        if self._current_agent is None:
            raise UserError("Cannot serialize RunState: No current agent")
        if self._context is None:
            raise UserError("Cannot serialize RunState: No context")

        approvals_dict = self._serialize_approvals()
        model_responses = self._serialize_model_responses()
        original_input_serialized = self._serialize_original_input()
        context_payload, context_meta = self._serialize_context_payload(
            context_serializer=context_serializer,
            strict_context=strict_context,
        )

        context_entry: dict[str, Any] = {
            "usage": serialize_usage(self._context.usage),
            "approvals": approvals_dict,
            "context": context_payload,
            # Preserve metadata so deserialization can warn when context types were erased.
            "context_meta": context_meta,
        }
        tool_input = self._serialize_tool_input(self._context.tool_input)
        if tool_input is not None:
            context_entry["tool_input"] = tool_input

        result = {
            "$schemaVersion": CURRENT_SCHEMA_VERSION,
            "current_turn": self._current_turn,
            "current_agent": {"name": self._current_agent.name},
            "original_input": original_input_serialized,
            "model_responses": model_responses,
            "context": context_entry,
            "tool_use_tracker": copy.deepcopy(self._tool_use_tracker_snapshot),
            "max_turns": self._max_turns,
            "no_active_agent_run": True,
            "input_guardrail_results": _serialize_guardrail_results(self._input_guardrail_results),
            "output_guardrail_results": _serialize_guardrail_results(
                self._output_guardrail_results
            ),
            "tool_input_guardrail_results": _serialize_tool_guardrail_results(
                self._tool_input_guardrail_results, type_label="tool_input"
            ),
            "tool_output_guardrail_results": _serialize_tool_guardrail_results(
                self._tool_output_guardrail_results, type_label="tool_output"
            ),
            "conversation_id": self._conversation_id,
            "previous_response_id": self._previous_response_id,
            "auto_previous_response_id": self._auto_previous_response_id,
        }

        generated_items = self._merge_generated_items_with_processed()
        result["generated_items"] = [self._serialize_item(item) for item in generated_items]
        result["session_items"] = [self._serialize_item(item) for item in list(self._session_items)]
        result["current_step"] = self._serialize_current_step()
        result["last_model_response"] = _serialize_last_model_response(model_responses)
        result["last_processed_response"] = (
            self._serialize_processed_response(self._last_processed_response)
            if self._last_processed_response
            else None
        )
        result["current_turn_persisted_item_count"] = self._current_turn_persisted_item_count
        result["trace"] = self._serialize_trace_data(
            include_tracing_api_key=include_tracing_api_key
        )

        return result

    def _serialize_processed_response(
        self, processed_response: ProcessedResponse
    ) -> dict[str, Any]:
        """Serialize a ProcessedResponse to JSON format.

        Args:
            processed_response: The ProcessedResponse to serialize.

        Returns:
            A dictionary representation of the ProcessedResponse.
        """

        action_groups = _serialize_tool_action_groups(processed_response)

        interruptions_data = [
            _serialize_tool_approval_interruption(interruption, include_tool_name=True)
            for interruption in processed_response.interruptions
            if isinstance(interruption, ToolApprovalItem)
        ]

        return {
            "new_items": [self._serialize_item(item) for item in processed_response.new_items],
            "tools_used": processed_response.tools_used,
            **action_groups,
            "interruptions": interruptions_data,
        }

    def _serialize_current_step(self) -> dict[str, Any] | None:
        """Serialize the current step if it's an interruption."""
        # Import at runtime to avoid circular import
        from .run_internal.run_steps import NextStepInterruption

        if self._current_step is None or not isinstance(self._current_step, NextStepInterruption):
            return None

        interruptions_data = [
            _serialize_tool_approval_interruption(
                item, include_tool_name=item.tool_name is not None
            )
            for item in self._current_step.interruptions
            if isinstance(item, ToolApprovalItem)
        ]

        return {
            "type": "next_step_interruption",
            "data": {
                "interruptions": interruptions_data,
            },
        }

    def _serialize_item(self, item: RunItem) -> dict[str, Any]:
        """Serialize a run item to JSON-compatible dict."""
        raw_item_dict: Any = _serialize_raw_item_value(item.raw_item)

        result: dict[str, Any] = {
            "type": item.type,
            "raw_item": raw_item_dict,
            "agent": {"name": item.agent.name},
        }

        # Add additional fields based on item type
        if hasattr(item, "output"):
            serialized_output = item.output
            try:
                if hasattr(serialized_output, "model_dump"):
                    serialized_output = serialized_output.model_dump(exclude_unset=True)
                elif dataclasses.is_dataclass(serialized_output):
                    serialized_output = dataclasses.asdict(serialized_output)  # type: ignore[arg-type]
                serialized_output = _ensure_json_compatible(serialized_output)
            except Exception:
                serialized_output = str(item.output)
            result["output"] = serialized_output
        if hasattr(item, "source_agent"):
            result["source_agent"] = {"name": item.source_agent.name}
        if hasattr(item, "target_agent"):
            result["target_agent"] = {"name": item.target_agent.name}
        if hasattr(item, "tool_name") and item.tool_name is not None:
            result["tool_name"] = item.tool_name
        if hasattr(item, "description") and item.description is not None:
            result["description"] = item.description

        return result

    def _lookup_function_name(self, call_id: str) -> str:
        """Attempt to find the function name for the provided call_id."""
        if not call_id:
            return ""

        def _extract_name(raw: Any) -> str | None:
            if isinstance(raw, dict):
                candidate_call_id = cast(Optional[str], raw.get("call_id"))
                if candidate_call_id == call_id:
                    name_value = raw.get("name", "")
                    return str(name_value) if name_value else ""
            else:
                candidate_call_id = cast(Optional[str], _get_attr(raw, "call_id"))
                if candidate_call_id == call_id:
                    name_value = _get_attr(raw, "name", "")
                    return str(name_value) if name_value else ""
            return None

        # Search generated items first
        for run_item in self._generated_items:
            if run_item.type != "tool_call_item":
                continue
            name = _extract_name(run_item.raw_item)
            if name is not None:
                return name

        # Inspect last processed response
        if self._last_processed_response is not None:
            for run_item in self._last_processed_response.new_items:
                if run_item.type != "tool_call_item":
                    continue
                name = _extract_name(run_item.raw_item)
                if name is not None:
                    return name

        # Finally, inspect the original input list where the function call originated
        if isinstance(self._original_input, list):
            for input_item in self._original_input:
                if not isinstance(input_item, dict):
                    continue
                if input_item.get("type") != "function_call":
                    continue
                item_call_id = cast(Optional[str], input_item.get("call_id"))
                if item_call_id == call_id:
                    name_value = input_item.get("name", "")
                    return str(name_value) if name_value else ""

        return ""

    def to_string(
        self,
        *,
        context_serializer: ContextSerializer | None = None,
        strict_context: bool = False,
        include_tracing_api_key: bool = False,
    ) -> str:
        """Serializes the run state to a JSON string.

        Args:
            include_tracing_api_key: When True, include the tracing API key in the trace payload.

        Returns:
            JSON string representation of the run state.
        """
        return json.dumps(
            self.to_json(
                context_serializer=context_serializer,
                strict_context=strict_context,
                include_tracing_api_key=include_tracing_api_key,
            ),
            indent=2,
        )

    def set_trace(self, trace: Trace | None) -> None:
        """Capture trace metadata for serialization/resumption."""
        self._trace_state = TraceState.from_trace(trace)

    def _serialize_trace_data(self, *, include_tracing_api_key: bool) -> dict[str, Any] | None:
        if not self._trace_state:
            return None
        return self._trace_state.to_json(include_tracing_api_key=include_tracing_api_key)

    def set_tool_use_tracker_snapshot(self, snapshot: Mapping[str, Sequence[str]] | None) -> None:
        """Store a copy of the serialized tool-use tracker data."""
        if not snapshot:
            self._tool_use_tracker_snapshot = {}
            return

        normalized: dict[str, list[str]] = {}
        for agent_name, tools in snapshot.items():
            if not isinstance(agent_name, str):
                continue
            normalized[agent_name] = [tool for tool in tools if isinstance(tool, str)]
        self._tool_use_tracker_snapshot = normalized

    def get_tool_use_tracker_snapshot(self) -> dict[str, list[str]]:
        """Return a defensive copy of the tool-use tracker snapshot."""
        return {
            agent_name: list(tool_names)
            for agent_name, tool_names in self._tool_use_tracker_snapshot.items()
        }

    @staticmethod
    async def from_string(
        initial_agent: Agent[Any],
        state_string: str,
        *,
        context_override: ContextOverride | None = None,
        context_deserializer: ContextDeserializer | None = None,
        strict_context: bool = False,
    ) -> RunState[Any, Agent[Any]]:
        """Deserializes a run state from a JSON string.

        This method is used to deserialize a run state from a string that was serialized using
        the `to_string()` method.

        Args:
            initial_agent: The initial agent (used to build agent map for resolution).
            state_string: The JSON string to deserialize.
            context_override: Optional context mapping or RunContextWrapper to use instead of the
                serialized context.
            context_deserializer: Optional function to rebuild non-mapping context values.
            strict_context: When True, require a deserializer or override for non-mapping contexts.

        Returns:
            A reconstructed RunState instance.

        Raises:
            UserError: If the string is invalid JSON or has incompatible schema version.
        """
        try:
            state_json = json.loads(state_string)
        except json.JSONDecodeError as e:
            raise UserError(f"Failed to parse run state JSON: {e}") from e

        return await RunState.from_json(
            initial_agent=initial_agent,
            state_json=state_json,
            context_override=context_override,
            context_deserializer=context_deserializer,
            strict_context=strict_context,
        )

    @staticmethod
    async def from_json(
        initial_agent: Agent[Any],
        state_json: dict[str, Any],
        *,
        context_override: ContextOverride | None = None,
        context_deserializer: ContextDeserializer | None = None,
        strict_context: bool = False,
    ) -> RunState[Any, Agent[Any]]:
        """Deserializes a run state from a JSON dictionary.

        This method is used to deserialize a run state from a dict that was created using
        the `to_json()` method.

        Args:
            initial_agent: The initial agent (used to build agent map for resolution).
            state_json: The JSON dictionary to deserialize.
            context_override: Optional context mapping or RunContextWrapper to use instead of the
                serialized context.
            context_deserializer: Optional function to rebuild non-mapping context values.
            strict_context: When True, require a deserializer or override for non-mapping contexts.

        Returns:
            A reconstructed RunState instance.

        Raises:
            UserError: If the dict has incompatible schema version.
        """
        return await _build_run_state_from_json(
            initial_agent=initial_agent,
            state_json=state_json,
            context_override=context_override,
            context_deserializer=context_deserializer,
            strict_context=strict_context,
        )

__init__

__init__(
    context: RunContextWrapper[TContext],
    original_input: str | list[Any],
    starting_agent: TAgent,
    max_turns: int = 10,
    *,
    conversation_id: str | None = None,
    previous_response_id: str | None = None,
    auto_previous_response_id: bool = False,
)

Initialize a new RunState.

Source code in src/agents/run_state.py
def __init__(
    self,
    context: RunContextWrapper[TContext],
    original_input: str | list[Any],
    starting_agent: TAgent,
    max_turns: int = 10,
    *,
    conversation_id: str | None = None,
    previous_response_id: str | None = None,
    auto_previous_response_id: bool = False,
):
    """Initialize a new RunState."""
    self._context = context
    self._original_input = _clone_original_input(original_input)
    self._current_agent = starting_agent
    self._max_turns = max_turns
    self._conversation_id = conversation_id
    self._previous_response_id = previous_response_id
    self._auto_previous_response_id = auto_previous_response_id
    self._model_responses = []
    self._generated_items = []
    self._session_items = []
    self._input_guardrail_results = []
    self._output_guardrail_results = []
    self._tool_input_guardrail_results = []
    self._tool_output_guardrail_results = []
    self._current_step = None
    self._current_turn = 0
    self._last_processed_response = None
    self._current_turn_persisted_item_count = 0
    self._tool_use_tracker_snapshot = {}
    self._trace_state = None

get_interruptions

get_interruptions() -> list[ToolApprovalItem]

Return pending interruptions if the current step is an interruption.

Source code in src/agents/run_state.py
def get_interruptions(self) -> list[ToolApprovalItem]:
    """Return pending interruptions if the current step is an interruption."""
    # Import at runtime to avoid circular import
    from .run_internal.run_steps import NextStepInterruption

    if self._current_step is None or not isinstance(self._current_step, NextStepInterruption):
        return []
    return self._current_step.interruptions

approve

approve(
    approval_item: ToolApprovalItem,
    always_approve: bool = False,
) -> None

Approve a tool call and rerun with this state to continue.

Source code in src/agents/run_state.py
def approve(self, approval_item: ToolApprovalItem, always_approve: bool = False) -> None:
    """Approve a tool call and rerun with this state to continue."""
    if self._context is None:
        raise UserError("Cannot approve tool: RunState has no context")
    self._context.approve_tool(approval_item, always_approve=always_approve)

reject

reject(
    approval_item: ToolApprovalItem,
    always_reject: bool = False,
) -> None

Reject a tool call and rerun with this state to continue.

Source code in src/agents/run_state.py
def reject(self, approval_item: ToolApprovalItem, always_reject: bool = False) -> None:
    """Reject a tool call and rerun with this state to continue."""
    if self._context is None:
        raise UserError("Cannot reject tool: RunState has no context")
    self._context.reject_tool(approval_item, always_reject=always_reject)

to_json

to_json(
    *,
    context_serializer: ContextSerializer | None = None,
    strict_context: bool = False,
    include_tracing_api_key: bool = False,
) -> dict[str, Any]

Serializes the run state to a JSON-compatible dictionary.

This method is used to serialize the run state to a dictionary that can be used to resume the run later.

Parameters:

Name Type Description Default
context_serializer ContextSerializer | None

Optional function to serialize non-mapping context values.

None
strict_context bool

When True, require mapping contexts or a context_serializer.

False
include_tracing_api_key bool

When True, include the tracing API key in the trace payload.

False

Returns:

Type Description
dict[str, Any]

A dictionary representation of the run state.

Raises:

Type Description
UserError

If required state (agent, context) is missing.

Source code in src/agents/run_state.py
def to_json(
    self,
    *,
    context_serializer: ContextSerializer | None = None,
    strict_context: bool = False,
    include_tracing_api_key: bool = False,
) -> dict[str, Any]:
    """Serializes the run state to a JSON-compatible dictionary.

    This method is used to serialize the run state to a dictionary that can be used to
    resume the run later.

    Args:
        context_serializer: Optional function to serialize non-mapping context values.
        strict_context: When True, require mapping contexts or a context_serializer.
        include_tracing_api_key: When True, include the tracing API key in the trace payload.

    Returns:
        A dictionary representation of the run state.

    Raises:
        UserError: If required state (agent, context) is missing.
    """
    if self._current_agent is None:
        raise UserError("Cannot serialize RunState: No current agent")
    if self._context is None:
        raise UserError("Cannot serialize RunState: No context")

    approvals_dict = self._serialize_approvals()
    model_responses = self._serialize_model_responses()
    original_input_serialized = self._serialize_original_input()
    context_payload, context_meta = self._serialize_context_payload(
        context_serializer=context_serializer,
        strict_context=strict_context,
    )

    context_entry: dict[str, Any] = {
        "usage": serialize_usage(self._context.usage),
        "approvals": approvals_dict,
        "context": context_payload,
        # Preserve metadata so deserialization can warn when context types were erased.
        "context_meta": context_meta,
    }
    tool_input = self._serialize_tool_input(self._context.tool_input)
    if tool_input is not None:
        context_entry["tool_input"] = tool_input

    result = {
        "$schemaVersion": CURRENT_SCHEMA_VERSION,
        "current_turn": self._current_turn,
        "current_agent": {"name": self._current_agent.name},
        "original_input": original_input_serialized,
        "model_responses": model_responses,
        "context": context_entry,
        "tool_use_tracker": copy.deepcopy(self._tool_use_tracker_snapshot),
        "max_turns": self._max_turns,
        "no_active_agent_run": True,
        "input_guardrail_results": _serialize_guardrail_results(self._input_guardrail_results),
        "output_guardrail_results": _serialize_guardrail_results(
            self._output_guardrail_results
        ),
        "tool_input_guardrail_results": _serialize_tool_guardrail_results(
            self._tool_input_guardrail_results, type_label="tool_input"
        ),
        "tool_output_guardrail_results": _serialize_tool_guardrail_results(
            self._tool_output_guardrail_results, type_label="tool_output"
        ),
        "conversation_id": self._conversation_id,
        "previous_response_id": self._previous_response_id,
        "auto_previous_response_id": self._auto_previous_response_id,
    }

    generated_items = self._merge_generated_items_with_processed()
    result["generated_items"] = [self._serialize_item(item) for item in generated_items]
    result["session_items"] = [self._serialize_item(item) for item in list(self._session_items)]
    result["current_step"] = self._serialize_current_step()
    result["last_model_response"] = _serialize_last_model_response(model_responses)
    result["last_processed_response"] = (
        self._serialize_processed_response(self._last_processed_response)
        if self._last_processed_response
        else None
    )
    result["current_turn_persisted_item_count"] = self._current_turn_persisted_item_count
    result["trace"] = self._serialize_trace_data(
        include_tracing_api_key=include_tracing_api_key
    )

    return result

to_string

to_string(
    *,
    context_serializer: ContextSerializer | None = None,
    strict_context: bool = False,
    include_tracing_api_key: bool = False,
) -> str

Serializes the run state to a JSON string.

Parameters:

Name Type Description Default
include_tracing_api_key bool

When True, include the tracing API key in the trace payload.

False

Returns:

Type Description
str

JSON string representation of the run state.

Source code in src/agents/run_state.py
def to_string(
    self,
    *,
    context_serializer: ContextSerializer | None = None,
    strict_context: bool = False,
    include_tracing_api_key: bool = False,
) -> str:
    """Serializes the run state to a JSON string.

    Args:
        include_tracing_api_key: When True, include the tracing API key in the trace payload.

    Returns:
        JSON string representation of the run state.
    """
    return json.dumps(
        self.to_json(
            context_serializer=context_serializer,
            strict_context=strict_context,
            include_tracing_api_key=include_tracing_api_key,
        ),
        indent=2,
    )

set_trace

set_trace(trace: Trace | None) -> None

Capture trace metadata for serialization/resumption.

Source code in src/agents/run_state.py
def set_trace(self, trace: Trace | None) -> None:
    """Capture trace metadata for serialization/resumption."""
    self._trace_state = TraceState.from_trace(trace)

set_tool_use_tracker_snapshot

set_tool_use_tracker_snapshot(
    snapshot: Mapping[str, Sequence[str]] | None,
) -> None

Store a copy of the serialized tool-use tracker data.

Source code in src/agents/run_state.py
def set_tool_use_tracker_snapshot(self, snapshot: Mapping[str, Sequence[str]] | None) -> None:
    """Store a copy of the serialized tool-use tracker data."""
    if not snapshot:
        self._tool_use_tracker_snapshot = {}
        return

    normalized: dict[str, list[str]] = {}
    for agent_name, tools in snapshot.items():
        if not isinstance(agent_name, str):
            continue
        normalized[agent_name] = [tool for tool in tools if isinstance(tool, str)]
    self._tool_use_tracker_snapshot = normalized

get_tool_use_tracker_snapshot

get_tool_use_tracker_snapshot() -> dict[str, list[str]]

Return a defensive copy of the tool-use tracker snapshot.

Source code in src/agents/run_state.py
def get_tool_use_tracker_snapshot(self) -> dict[str, list[str]]:
    """Return a defensive copy of the tool-use tracker snapshot."""
    return {
        agent_name: list(tool_names)
        for agent_name, tool_names in self._tool_use_tracker_snapshot.items()
    }

from_string async staticmethod

from_string(
    initial_agent: Agent[Any],
    state_string: str,
    *,
    context_override: ContextOverride | None = None,
    context_deserializer: ContextDeserializer | None = None,
    strict_context: bool = False,
) -> RunState[Any, Agent[Any]]

Deserializes a run state from a JSON string.

This method is used to deserialize a run state from a string that was serialized using the to_string() method.

Parameters:

Name Type Description Default
initial_agent Agent[Any]

The initial agent (used to build agent map for resolution).

required
state_string str

The JSON string to deserialize.

required
context_override ContextOverride | None

Optional context mapping or RunContextWrapper to use instead of the serialized context.

None
context_deserializer ContextDeserializer | None

Optional function to rebuild non-mapping context values.

None
strict_context bool

When True, require a deserializer or override for non-mapping contexts.

False

Returns:

Type Description
RunState[Any, Agent[Any]]

A reconstructed RunState instance.

Raises:

Type Description
UserError

If the string is invalid JSON or has incompatible schema version.

Source code in src/agents/run_state.py
@staticmethod
async def from_string(
    initial_agent: Agent[Any],
    state_string: str,
    *,
    context_override: ContextOverride | None = None,
    context_deserializer: ContextDeserializer | None = None,
    strict_context: bool = False,
) -> RunState[Any, Agent[Any]]:
    """Deserializes a run state from a JSON string.

    This method is used to deserialize a run state from a string that was serialized using
    the `to_string()` method.

    Args:
        initial_agent: The initial agent (used to build agent map for resolution).
        state_string: The JSON string to deserialize.
        context_override: Optional context mapping or RunContextWrapper to use instead of the
            serialized context.
        context_deserializer: Optional function to rebuild non-mapping context values.
        strict_context: When True, require a deserializer or override for non-mapping contexts.

    Returns:
        A reconstructed RunState instance.

    Raises:
        UserError: If the string is invalid JSON or has incompatible schema version.
    """
    try:
        state_json = json.loads(state_string)
    except json.JSONDecodeError as e:
        raise UserError(f"Failed to parse run state JSON: {e}") from e

    return await RunState.from_json(
        initial_agent=initial_agent,
        state_json=state_json,
        context_override=context_override,
        context_deserializer=context_deserializer,
        strict_context=strict_context,
    )

from_json async staticmethod

from_json(
    initial_agent: Agent[Any],
    state_json: dict[str, Any],
    *,
    context_override: ContextOverride | None = None,
    context_deserializer: ContextDeserializer | None = None,
    strict_context: bool = False,
) -> RunState[Any, Agent[Any]]

Deserializes a run state from a JSON dictionary.

This method is used to deserialize a run state from a dict that was created using the to_json() method.

Parameters:

Name Type Description Default
initial_agent Agent[Any]

The initial agent (used to build agent map for resolution).

required
state_json dict[str, Any]

The JSON dictionary to deserialize.

required
context_override ContextOverride | None

Optional context mapping or RunContextWrapper to use instead of the serialized context.

None
context_deserializer ContextDeserializer | None

Optional function to rebuild non-mapping context values.

None
strict_context bool

When True, require a deserializer or override for non-mapping contexts.

False

Returns:

Type Description
RunState[Any, Agent[Any]]

A reconstructed RunState instance.

Raises:

Type Description
UserError

If the dict has incompatible schema version.

Source code in src/agents/run_state.py
@staticmethod
async def from_json(
    initial_agent: Agent[Any],
    state_json: dict[str, Any],
    *,
    context_override: ContextOverride | None = None,
    context_deserializer: ContextDeserializer | None = None,
    strict_context: bool = False,
) -> RunState[Any, Agent[Any]]:
    """Deserializes a run state from a JSON dictionary.

    This method is used to deserialize a run state from a dict that was created using
    the `to_json()` method.

    Args:
        initial_agent: The initial agent (used to build agent map for resolution).
        state_json: The JSON dictionary to deserialize.
        context_override: Optional context mapping or RunContextWrapper to use instead of the
            serialized context.
        context_deserializer: Optional function to rebuild non-mapping context values.
        strict_context: When True, require a deserializer or override for non-mapping contexts.

    Returns:
        A reconstructed RunState instance.

    Raises:
        UserError: If the dict has incompatible schema version.
    """
    return await _build_run_state_from_json(
        initial_agent=initial_agent,
        state_json=state_json,
        context_override=context_override,
        context_deserializer=context_deserializer,
        strict_context=strict_context,
    )