Skip to content

pydantic_ai.Agent

Bases: Generic[AgentDeps, ResultData]

Class for defining "agents" - a way to have a specific type of "conversation" with an LLM.

Agents are generic in the dependency type they take AgentDeps and the result data type they return, ResultData.

By default, if neither generic parameter is customised, agents have type Agent[None, str].

Minimal usage example:

from pydantic_ai import Agent

agent = Agent('openai:gpt-4o')
result = agent.run_sync('What is the capital of France?')
print(result.data)
#> Paris
Source code in pydantic_ai_slim/pydantic_ai/agent.py
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
@final
@dataclass(init=False)
class Agent(Generic[AgentDeps, ResultData]):
    """Class for defining "agents" - a way to have a specific type of "conversation" with an LLM.

    Agents are generic in the dependency type they take [`AgentDeps`][pydantic_ai.dependencies.AgentDeps]
    and the result data type they return, [`ResultData`][pydantic_ai.result.ResultData].

    By default, if neither generic parameter is customised, agents have type `Agent[None, str]`.

    Minimal usage example:

    ```py
    from pydantic_ai import Agent

    agent = Agent('openai:gpt-4o')
    result = agent.run_sync('What is the capital of France?')
    print(result.data)
    #> Paris
    ```
    """

    # dataclass fields mostly for my sanity — knowing what attributes are available
    model: models.Model | models.KnownModelName | None
    """The default model configured for this agent."""
    _result_schema: _result.ResultSchema[ResultData] | None = field(repr=False)
    _result_validators: list[_result.ResultValidator[AgentDeps, ResultData]] = field(repr=False)
    _allow_text_result: bool = field(repr=False)
    _system_prompts: tuple[str, ...] = field(repr=False)
    _function_tools: dict[str, _r.Tool[AgentDeps, Any]] = field(repr=False)
    _default_retries: int = field(repr=False)
    _system_prompt_functions: list[_system_prompt.SystemPromptRunner[AgentDeps]] = field(repr=False)
    _deps_type: type[AgentDeps] = field(repr=False)
    _max_result_retries: int = field(repr=False)
    _current_result_retry: int = field(repr=False)
    _override_deps: _utils.Option[AgentDeps] = field(default=None, repr=False)
    _override_model: _utils.Option[models.Model] = field(default=None, repr=False)
    last_run_messages: list[_messages.Message] | None = None
    """The messages from the last run, useful when a run raised an exception.

    Note: these are not used by the agent, e.g. in future runs, they are just stored for developers' convenience.
    """

    def __init__(
        self,
        model: models.Model | models.KnownModelName | None = None,
        *,
        result_type: type[ResultData] = str,
        system_prompt: str | Sequence[str] = (),
        deps_type: type[AgentDeps] = NoneType,
        retries: int = 1,
        result_tool_name: str = 'final_result',
        result_tool_description: str | None = None,
        result_retries: int | None = None,
        defer_model_check: bool = False,
    ):
        """Create an agent.

        Args:
            model: The default model to use for this agent, if not provide,
                you must provide the model when calling the agent.
            result_type: The type of the result data, used to validate the result data, defaults to `str`.
            system_prompt: Static system prompts to use for this agent, you can also register system
                prompts via a function with [`system_prompt`][pydantic_ai.Agent.system_prompt].
            deps_type: The type used for dependency injection, this parameter exists solely to allow you to fully
                parameterize the agent, and therefore get the best out of static type checking.
                If you're not using deps, but want type checking to pass, you can set `deps=None` to satisfy Pyright
                or add a type hint `: Agent[None, <return type>]`.
            retries: The default number of retries to allow before raising an error.
            result_tool_name: The name of the tool to use for the final result.
            result_tool_description: The description of the final result tool.
            result_retries: The maximum number of retries to allow for result validation, defaults to `retries`.
            defer_model_check: by default, if you provide a [named][pydantic_ai.models.KnownModelName] model,
                it's evaluated to create a [`Model`][pydantic_ai.models.Model] instance immediately,
                which checks for the necessary environment variables. Set this to `false`
                to defer the evaluation until the first run. Useful if you want to
                [override the model][pydantic_ai.Agent.override] for testing.
        """
        if model is None or defer_model_check:
            self.model = model
        else:
            self.model = models.infer_model(model)

        self._result_schema = _result.ResultSchema[result_type].build(
            result_type, result_tool_name, result_tool_description
        )
        # if the result tool is None, or its schema allows `str`, we allow plain text results
        self._allow_text_result = self._result_schema is None or self._result_schema.allow_text_result

        self._system_prompts = (system_prompt,) if isinstance(system_prompt, str) else tuple(system_prompt)
        self._function_tools: dict[str, _r.Tool[AgentDeps, Any]] = {}
        self._deps_type = deps_type
        self._default_retries = retries
        self._system_prompt_functions = []
        self._max_result_retries = result_retries if result_retries is not None else retries
        self._current_result_retry = 0
        self._result_validators = []

    async def run(
        self,
        user_prompt: str,
        *,
        message_history: list[_messages.Message] | None = None,
        model: models.Model | models.KnownModelName | None = None,
        deps: AgentDeps = None,
    ) -> result.RunResult[ResultData]:
        """Run the agent with a user prompt in async mode.

        Args:
            user_prompt: User input to start/continue the conversation.
            message_history: History of the conversation so far.
            model: Optional model to use for this run, required if `model` was not set when creating the agent.
            deps: Optional dependencies to use for this run.

        Returns:
            The result of the run.
        """
        model_used, custom_model, agent_model = await self._get_agent_model(model)

        deps = self._get_deps(deps)

        with _logfire.span(
            'agent run {prompt=}',
            prompt=user_prompt,
            agent=self,
            custom_model=custom_model,
            model_name=model_used.name(),
        ) as run_span:
            new_message_index, messages = await self._prepare_messages(deps, user_prompt, message_history)
            self.last_run_messages = messages

            for tool in self._function_tools.values():
                tool.reset()

            cost = result.Cost()

            run_step = 0
            while True:
                run_step += 1
                with _logfire.span('model request {run_step=}', run_step=run_step) as model_req_span:
                    model_response, request_cost = await agent_model.request(messages)
                    model_req_span.set_attribute('response', model_response)
                    model_req_span.set_attribute('cost', request_cost)
                    model_req_span.message = f'model request -> {model_response.role}'

                messages.append(model_response)
                cost += request_cost

                with _logfire.span('handle model response') as handle_span:
                    either = await self._handle_model_response(model_response, deps)

                    if isinstance(either, _MarkFinalResult):
                        # we have a final result, end the conversation
                        result_data = either.data
                        run_span.set_attribute('all_messages', messages)
                        run_span.set_attribute('cost', cost)
                        handle_span.set_attribute('result', result_data)
                        handle_span.message = 'handle model response -> final result'
                        return result.RunResult(messages, new_message_index, result_data, cost)
                    else:
                        # continue the conversation
                        tool_responses = either
                        handle_span.set_attribute('tool_responses', tool_responses)
                        response_msgs = ' '.join(m.role for m in tool_responses)
                        handle_span.message = f'handle model response -> {response_msgs}'
                        messages.extend(tool_responses)

    def run_sync(
        self,
        user_prompt: str,
        *,
        message_history: list[_messages.Message] | None = None,
        model: models.Model | models.KnownModelName | None = None,
        deps: AgentDeps = None,
    ) -> result.RunResult[ResultData]:
        """Run the agent with a user prompt synchronously.

        This is a convenience method that wraps `self.run` with `loop.run_until_complete()`.

        Args:
            user_prompt: User input to start/continue the conversation.
            message_history: History of the conversation so far.
            model: Optional model to use for this run, required if `model` was not set when creating the agent.
            deps: Optional dependencies to use for this run.

        Returns:
            The result of the run.
        """
        loop = asyncio.get_event_loop()
        return loop.run_until_complete(self.run(user_prompt, message_history=message_history, model=model, deps=deps))

    @asynccontextmanager
    async def run_stream(
        self,
        user_prompt: str,
        *,
        message_history: list[_messages.Message] | None = None,
        model: models.Model | models.KnownModelName | None = None,
        deps: AgentDeps = None,
    ) -> AsyncIterator[result.StreamedRunResult[AgentDeps, ResultData]]:
        """Run the agent with a user prompt in async mode, returning a streamed response.

        Args:
            user_prompt: User input to start/continue the conversation.
            message_history: History of the conversation so far.
            model: Optional model to use for this run, required if `model` was not set when creating the agent.
            deps: Optional dependencies to use for this run.

        Returns:
            The result of the run.
        """
        model_used, custom_model, agent_model = await self._get_agent_model(model)

        deps = self._get_deps(deps)

        with _logfire.span(
            'agent run stream {prompt=}',
            prompt=user_prompt,
            agent=self,
            custom_model=custom_model,
            model_name=model_used.name(),
        ) as run_span:
            new_message_index, messages = await self._prepare_messages(deps, user_prompt, message_history)
            self.last_run_messages = messages

            for tool in self._function_tools.values():
                tool.reset()

            cost = result.Cost()

            run_step = 0
            while True:
                run_step += 1
                with _logfire.span('model request {run_step=}', run_step=run_step) as model_req_span:
                    async with agent_model.request_stream(messages) as model_response:
                        model_req_span.set_attribute('response_type', model_response.__class__.__name__)
                        # We want to end the "model request" span here, but we can't exit the context manager
                        # in the traditional way
                        model_req_span.__exit__(None, None, None)

                        with _logfire.span('handle model response') as handle_span:
                            either = await self._handle_streamed_model_response(model_response, deps)

                            if isinstance(either, _MarkFinalResult):
                                result_stream = either.data
                                run_span.set_attribute('all_messages', messages)
                                handle_span.set_attribute('result_type', result_stream.__class__.__name__)
                                handle_span.message = 'handle model response -> final result'
                                yield result.StreamedRunResult(
                                    messages,
                                    new_message_index,
                                    cost,
                                    result_stream,
                                    self._result_schema,
                                    deps,
                                    self._result_validators,
                                    lambda m: run_span.set_attribute('all_messages', messages),
                                )
                                return
                            else:
                                tool_responses = either
                                handle_span.set_attribute('tool_responses', tool_responses)
                                response_msgs = ' '.join(m.role for m in tool_responses)
                                handle_span.message = f'handle model response -> {response_msgs}'
                                messages.extend(tool_responses)
                                # the model_response should have been fully streamed by now, we can add it's cost
                                cost += model_response.cost()

    @contextmanager
    def override(
        self,
        *,
        deps: AgentDeps | _utils.Unset = _utils.UNSET,
        model: models.Model | models.KnownModelName | _utils.Unset = _utils.UNSET,
    ) -> Iterator[None]:
        """Context manager to temporarily override agent dependencies and model.

        This is particularly useful when testing.

        Args:
            deps: The dependencies to use instead of the dependencies passed to the agent run.
            model: The model to use instead of the model passed to the agent run.
        """
        if _utils.is_set(deps):
            override_deps_before = self._override_deps
            self._override_deps = _utils.Some(deps)
        else:
            override_deps_before = _utils.UNSET

        # noinspection PyTypeChecker
        if _utils.is_set(model):
            override_model_before = self._override_model
            # noinspection PyTypeChecker
            self._override_model = _utils.Some(models.infer_model(model))  # pyright: ignore[reportArgumentType]
        else:
            override_model_before = _utils.UNSET

        try:
            yield
        finally:
            if _utils.is_set(override_deps_before):
                self._override_deps = override_deps_before
            if _utils.is_set(override_model_before):
                self._override_model = override_model_before

    @overload
    def system_prompt(
        self, func: Callable[[RunContext[AgentDeps]], str], /
    ) -> Callable[[RunContext[AgentDeps]], str]: ...

    @overload
    def system_prompt(
        self, func: Callable[[RunContext[AgentDeps]], Awaitable[str]], /
    ) -> Callable[[RunContext[AgentDeps]], Awaitable[str]]: ...

    @overload
    def system_prompt(self, func: Callable[[], str], /) -> Callable[[], str]: ...

    @overload
    def system_prompt(self, func: Callable[[], Awaitable[str]], /) -> Callable[[], Awaitable[str]]: ...

    def system_prompt(
        self, func: _system_prompt.SystemPromptFunc[AgentDeps], /
    ) -> _system_prompt.SystemPromptFunc[AgentDeps]:
        """Decorator to register a system prompt function.

        Optionally takes [`RunContext`][pydantic_ai.dependencies.RunContext] as it's only argument.
        Can decorate a sync or async functions.

        Overloads for every possible signature of `system_prompt` are included so the decorator doesn't obscure
        the type of the function, see `tests/typed_agent.py` for tests.

        Example:
        ```py
        from pydantic_ai import Agent, RunContext

        agent = Agent('test', deps_type=str)

        @agent.system_prompt
        def simple_system_prompt() -> str:
            return 'foobar'

        @agent.system_prompt
        async def async_system_prompt(ctx: RunContext[str]) -> str:
            return f'{ctx.deps} is the best'

        result = agent.run_sync('foobar', deps='spam')
        print(result.data)
        #> success (no tool calls)
        ```
        """
        self._system_prompt_functions.append(_system_prompt.SystemPromptRunner(func))
        return func

    @overload
    def result_validator(
        self, func: Callable[[RunContext[AgentDeps], ResultData], ResultData], /
    ) -> Callable[[RunContext[AgentDeps], ResultData], ResultData]: ...

    @overload
    def result_validator(
        self, func: Callable[[RunContext[AgentDeps], ResultData], Awaitable[ResultData]], /
    ) -> Callable[[RunContext[AgentDeps], ResultData], Awaitable[ResultData]]: ...

    @overload
    def result_validator(self, func: Callable[[ResultData], ResultData], /) -> Callable[[ResultData], ResultData]: ...

    @overload
    def result_validator(
        self, func: Callable[[ResultData], Awaitable[ResultData]], /
    ) -> Callable[[ResultData], Awaitable[ResultData]]: ...

    def result_validator(
        self, func: _result.ResultValidatorFunc[AgentDeps, ResultData], /
    ) -> _result.ResultValidatorFunc[AgentDeps, ResultData]:
        """Decorator to register a result validator function.

        Optionally takes [`RunContext`][pydantic_ai.dependencies.RunContext] as it's first argument.
        Can decorate a sync or async functions.

        Overloads for every possible signature of `result_validator` are included so the decorator doesn't obscure
        the type of the function, see `tests/typed_agent.py` for tests.

        Example:
        ```py
        from pydantic_ai import Agent, ModelRetry, RunContext

        agent = Agent('test', deps_type=str)

        @agent.result_validator
        def result_validator_simple(data: str) -> str:
            if 'wrong' in data:
                raise ModelRetry('wrong response')
            return data

        @agent.result_validator
        async def result_validator_deps(ctx: RunContext[str], data: str) -> str:
            if ctx.deps in data:
                raise ModelRetry('wrong response')
            return data

        result = agent.run_sync('foobar', deps='spam')
        print(result.data)
        #> success (no tool calls)
        ```
        """
        self._result_validators.append(_result.ResultValidator(func))
        return func

    @overload
    def tool(self, func: ToolContextFunc[AgentDeps, ToolParams], /) -> ToolContextFunc[AgentDeps, ToolParams]: ...

    @overload
    def tool(
        self, /, *, retries: int | None = None
    ) -> Callable[[ToolContextFunc[AgentDeps, ToolParams]], ToolContextFunc[AgentDeps, ToolParams]]: ...

    def tool(
        self,
        func: ToolContextFunc[AgentDeps, ToolParams] | None = None,
        /,
        *,
        retries: int | None = None,
    ) -> Any:
        """Decorator to register a tool function which takes
        [`RunContext`][pydantic_ai.dependencies.RunContext] as its first argument.

        Can decorate a sync or async functions.

        The docstring is inspected to extract both the tool description and description of each parameter,
        [learn more](../agents.md#function-tools-and-schema).

        We can't add overloads for every possible signature of tool, since the return type is a recursive union
        so the signature of functions decorated with `@agent.tool` is obscured.

        Example:
        ```py
        from pydantic_ai import Agent, RunContext

        agent = Agent('test', deps_type=int)

        @agent.tool
        def foobar(ctx: RunContext[int], x: int) -> int:
            return ctx.deps + x

        @agent.tool(retries=2)
        async def spam(ctx: RunContext[str], y: float) -> float:
            return ctx.deps + y

        result = agent.run_sync('foobar', deps=1)
        print(result.data)
        #> {"foobar":1,"spam":1.0}
        ```

        Args:
            func: The tool function to register.
            retries: The number of retries to allow for this tool, defaults to the agent's default retries,
                which defaults to 1.
        """  # noqa: D205
        if func is None:

            def tool_decorator(
                func_: ToolContextFunc[AgentDeps, ToolParams],
            ) -> ToolContextFunc[AgentDeps, ToolParams]:
                # noinspection PyTypeChecker
                self._register_tool(_utils.Either(left=func_), retries)
                return func_

            return tool_decorator
        else:
            # noinspection PyTypeChecker
            self._register_tool(_utils.Either(left=func), retries)
            return func

    @overload
    def tool_plain(self, func: ToolPlainFunc[ToolParams], /) -> ToolPlainFunc[ToolParams]: ...

    @overload
    def tool_plain(
        self, /, *, retries: int | None = None
    ) -> Callable[[ToolPlainFunc[ToolParams]], ToolPlainFunc[ToolParams]]: ...

    def tool_plain(self, func: ToolPlainFunc[ToolParams] | None = None, /, *, retries: int | None = None) -> Any:
        """Decorator to register a tool function which DOES NOT take `RunContext` as an argument.

        Can decorate a sync or async functions.

        The docstring is inspected to extract both the tool description and description of each parameter,
        [learn more](../agents.md#function-tools-and-schema).

        We can't add overloads for every possible signature of tool, since the return type is a recursive union
        so the signature of functions decorated with `@agent.tool` is obscured.

        Example:
        ```py
        from pydantic_ai import Agent, RunContext

        agent = Agent('test')

        @agent.tool
        def foobar(ctx: RunContext[int]) -> int:
            return 123

        @agent.tool(retries=2)
        async def spam(ctx: RunContext[str]) -> float:
            return 3.14

        result = agent.run_sync('foobar', deps=1)
        print(result.data)
        #> {"foobar":123,"spam":3.14}
        ```

        Args:
            func: The tool function to register.
            retries: The number of retries to allow for this tool, defaults to the agent's default retries,
                which defaults to 1.
        """
        if func is None:

            def tool_decorator(
                func_: ToolPlainFunc[ToolParams],
            ) -> ToolPlainFunc[ToolParams]:
                # noinspection PyTypeChecker
                self._register_tool(_utils.Either(right=func_), retries)
                return func_

            return tool_decorator
        else:
            self._register_tool(_utils.Either(right=func), retries)
            return func

    def _register_tool(self, func: _r.ToolEitherFunc[AgentDeps, ToolParams], retries: int | None) -> None:
        """Private utility to register a tool function."""
        retries_ = retries if retries is not None else self._default_retries
        tool = _r.Tool[AgentDeps, ToolParams](func, retries_)

        if self._result_schema and tool.name in self._result_schema.tools:
            raise ValueError(f'Tool name conflicts with result schema name: {tool.name!r}')

        if tool.name in self._function_tools:
            raise ValueError(f'Tool name conflicts with existing tool: {tool.name!r}')

        self._function_tools[tool.name] = tool

    async def _get_agent_model(
        self, model: models.Model | models.KnownModelName | None
    ) -> tuple[models.Model, models.Model | None, models.AgentModel]:
        """Create a model configured for this agent.

        Args:
            model: model to use for this run, required if `model` was not set when creating the agent.

        Returns:
            a tuple of `(model used, custom_model if any, agent_model)`
        """
        model_: models.Model
        if some_model := self._override_model:
            # we don't want `override()` to cover up errors from the model not being defined, hence this check
            if model is None and self.model is None:
                raise exceptions.UserError(
                    '`model` must be set either when creating the agent or when calling it. '
                    '(Even when `override(model=...)` is customizing the model that will actually be called)'
                )
            model_ = some_model.value
            custom_model = None
        elif model is not None:
            custom_model = model_ = models.infer_model(model)
        elif self.model is not None:
            # noinspection PyTypeChecker
            model_ = self.model = models.infer_model(self.model)
            custom_model = None
        else:
            raise exceptions.UserError('`model` must be set either when creating the agent or when calling it.')

        result_tools = list(self._result_schema.tools.values()) if self._result_schema else None
        agent_model = await model_.agent_model(self._function_tools, self._allow_text_result, result_tools)
        return model_, custom_model, agent_model

    async def _prepare_messages(
        self, deps: AgentDeps, user_prompt: str, message_history: list[_messages.Message] | None
    ) -> tuple[int, list[_messages.Message]]:
        # if message history includes system prompts, we don't want to regenerate them
        if message_history and any(m.role == 'system' for m in message_history):
            # shallow copy messages
            messages = message_history.copy()
        else:
            messages = await self._init_messages(deps)
            if message_history:
                messages += message_history

        new_message_index = len(messages)
        messages.append(_messages.UserPrompt(user_prompt))
        return new_message_index, messages

    async def _handle_model_response(
        self, model_response: _messages.ModelAnyResponse, deps: AgentDeps
    ) -> _MarkFinalResult[ResultData] | list[_messages.Message]:
        """Process a non-streamed response from the model.

        Returns:
            Return `Either` — left: final result data, right: list of messages to send back to the model.
        """
        if model_response.role == 'model-text-response':
            # plain string response
            if self._allow_text_result:
                result_data_input = cast(ResultData, model_response.content)
                try:
                    result_data = await self._validate_result(result_data_input, deps, None)
                except _result.ToolRetryError as e:
                    self._incr_result_retry()
                    return [e.tool_retry]
                else:
                    return _MarkFinalResult(result_data)
            else:
                self._incr_result_retry()
                response = _messages.RetryPrompt(
                    content='Plain text responses are not permitted, please call one of the functions instead.',
                )
                return [response]
        elif model_response.role == 'model-structured-response':
            if self._result_schema is not None:
                # if there's a result schema, and any of the calls match one of its tools, return the result
                # NOTE: this means we ignore any other tools called here
                if match := self._result_schema.find_tool(model_response):
                    call, result_tool = match
                    try:
                        result_data = result_tool.validate(call)
                        result_data = await self._validate_result(result_data, deps, call)
                    except _result.ToolRetryError as e:
                        self._incr_result_retry()
                        return [e.tool_retry]
                    else:
                        return _MarkFinalResult(result_data)

            if not model_response.calls:
                raise exceptions.UnexpectedModelBehavior('Received empty tool call message')

            # otherwise we run all tool functions in parallel
            messages: list[_messages.Message] = []
            tasks: list[asyncio.Task[_messages.Message]] = []
            for call in model_response.calls:
                if tool := self._function_tools.get(call.tool_name):
                    tasks.append(asyncio.create_task(tool.run(deps, call), name=call.tool_name))
                else:
                    messages.append(self._unknown_tool(call.tool_name))

            with _logfire.span('running {tools=}', tools=[t.get_name() for t in tasks]):
                messages += await asyncio.gather(*tasks)
            return messages
        else:
            assert_never(model_response)

    async def _handle_streamed_model_response(
        self, model_response: models.EitherStreamedResponse, deps: AgentDeps
    ) -> _MarkFinalResult[models.EitherStreamedResponse] | list[_messages.Message]:
        """Process a streamed response from the model.

        TODO: change the response type to `models.EitherStreamedResponse | list[_messages.Message]` once we drop 3.9
        (with 3.9 we get `TypeError: Subscripted generics cannot be used with class and instance checks`)

        Returns:
            Return `Either` — left: final result data, right: list of messages to send back to the model.
        """
        if isinstance(model_response, models.StreamTextResponse):
            # plain string response
            if self._allow_text_result:
                return _MarkFinalResult(model_response)
            else:
                self._incr_result_retry()
                response = _messages.RetryPrompt(
                    content='Plain text responses are not permitted, please call one of the functions instead.',
                )
                # stream the response, so cost is correct
                async for _ in model_response:
                    pass

                return [response]
        else:
            assert isinstance(model_response, models.StreamStructuredResponse), f'Unexpected response: {model_response}'
            if self._result_schema is not None:
                # if there's a result schema, iterate over the stream until we find at least one tool
                # NOTE: this means we ignore any other tools called here
                structured_msg = model_response.get()
                while not structured_msg.calls:
                    try:
                        await model_response.__anext__()
                    except StopAsyncIteration:
                        break
                    structured_msg = model_response.get()

                if self._result_schema.find_tool(structured_msg):
                    return _MarkFinalResult(model_response)

            # the model is calling a tool function, consume the response to get the next message
            async for _ in model_response:
                pass
            structured_msg = model_response.get()
            if not structured_msg.calls:
                raise exceptions.UnexpectedModelBehavior('Received empty tool call message')
            messages: list[_messages.Message] = [structured_msg]

            # we now run all tool functions in parallel
            tasks: list[asyncio.Task[_messages.Message]] = []
            for call in structured_msg.calls:
                if tool := self._function_tools.get(call.tool_name):
                    tasks.append(asyncio.create_task(tool.run(deps, call), name=call.tool_name))
                else:
                    messages.append(self._unknown_tool(call.tool_name))

            with _logfire.span('running {tools=}', tools=[t.get_name() for t in tasks]):
                messages += await asyncio.gather(*tasks)
            return messages

    async def _validate_result(
        self, result_data: ResultData, deps: AgentDeps, tool_call: _messages.ToolCall | None
    ) -> ResultData:
        for validator in self._result_validators:
            result_data = await validator.validate(result_data, deps, self._current_result_retry, tool_call)
        return result_data

    def _incr_result_retry(self) -> None:
        self._current_result_retry += 1
        if self._current_result_retry > self._max_result_retries:
            raise exceptions.UnexpectedModelBehavior(
                f'Exceeded maximum retries ({self._max_result_retries}) for result validation'
            )

    async def _init_messages(self, deps: AgentDeps) -> list[_messages.Message]:
        """Build the initial messages for the conversation."""
        messages: list[_messages.Message] = [_messages.SystemPrompt(p) for p in self._system_prompts]
        for sys_prompt_runner in self._system_prompt_functions:
            prompt = await sys_prompt_runner.run(deps)
            messages.append(_messages.SystemPrompt(prompt))
        return messages

    def _unknown_tool(self, tool_name: str) -> _messages.RetryPrompt:
        self._incr_result_retry()
        names = list(self._function_tools.keys())
        if self._result_schema:
            names.extend(self._result_schema.tool_names())
        if names:
            msg = f'Available tools: {", ".join(names)}'
        else:
            msg = 'No tools available.'
        return _messages.RetryPrompt(content=f'Unknown tool name: {tool_name!r}. {msg}')

    def _get_deps(self, deps: AgentDeps) -> AgentDeps:
        """Get deps for a run.

        If we've overridden deps via `_override_deps`, use that, otherwise use the deps passed to the call.

        We could do runtime type checking of deps against `self._deps_type`, but that's a slippery slope.
        """
        if some_deps := self._override_deps:
            return some_deps.value
        else:
            return deps

__init__

__init__(
    model: Model | KnownModelName | None = None,
    *,
    result_type: type[ResultData] = str,
    system_prompt: str | Sequence[str] = (),
    deps_type: type[AgentDeps] = NoneType,
    retries: int = 1,
    result_tool_name: str = "final_result",
    result_tool_description: str | None = None,
    result_retries: int | None = None,
    defer_model_check: bool = False
)

Create an agent.

Parameters:

Name Type Description Default
model Model | KnownModelName | None

The default model to use for this agent, if not provide, you must provide the model when calling the agent.

None
result_type type[ResultData]

The type of the result data, used to validate the result data, defaults to str.

str
system_prompt str | Sequence[str]

Static system prompts to use for this agent, you can also register system prompts via a function with system_prompt.

()
deps_type type[AgentDeps]

The type used for dependency injection, this parameter exists solely to allow you to fully parameterize the agent, and therefore get the best out of static type checking. If you're not using deps, but want type checking to pass, you can set deps=None to satisfy Pyright or add a type hint : Agent[None, <return type>].

NoneType
retries int

The default number of retries to allow before raising an error.

1
result_tool_name str

The name of the tool to use for the final result.

'final_result'
result_tool_description str | None

The description of the final result tool.

None
result_retries int | None

The maximum number of retries to allow for result validation, defaults to retries.

None
defer_model_check bool

by default, if you provide a named model, it's evaluated to create a Model instance immediately, which checks for the necessary environment variables. Set this to false to defer the evaluation until the first run. Useful if you want to override the model for testing.

False
Source code in pydantic_ai_slim/pydantic_ai/agent.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def __init__(
    self,
    model: models.Model | models.KnownModelName | None = None,
    *,
    result_type: type[ResultData] = str,
    system_prompt: str | Sequence[str] = (),
    deps_type: type[AgentDeps] = NoneType,
    retries: int = 1,
    result_tool_name: str = 'final_result',
    result_tool_description: str | None = None,
    result_retries: int | None = None,
    defer_model_check: bool = False,
):
    """Create an agent.

    Args:
        model: The default model to use for this agent, if not provide,
            you must provide the model when calling the agent.
        result_type: The type of the result data, used to validate the result data, defaults to `str`.
        system_prompt: Static system prompts to use for this agent, you can also register system
            prompts via a function with [`system_prompt`][pydantic_ai.Agent.system_prompt].
        deps_type: The type used for dependency injection, this parameter exists solely to allow you to fully
            parameterize the agent, and therefore get the best out of static type checking.
            If you're not using deps, but want type checking to pass, you can set `deps=None` to satisfy Pyright
            or add a type hint `: Agent[None, <return type>]`.
        retries: The default number of retries to allow before raising an error.
        result_tool_name: The name of the tool to use for the final result.
        result_tool_description: The description of the final result tool.
        result_retries: The maximum number of retries to allow for result validation, defaults to `retries`.
        defer_model_check: by default, if you provide a [named][pydantic_ai.models.KnownModelName] model,
            it's evaluated to create a [`Model`][pydantic_ai.models.Model] instance immediately,
            which checks for the necessary environment variables. Set this to `false`
            to defer the evaluation until the first run. Useful if you want to
            [override the model][pydantic_ai.Agent.override] for testing.
    """
    if model is None or defer_model_check:
        self.model = model
    else:
        self.model = models.infer_model(model)

    self._result_schema = _result.ResultSchema[result_type].build(
        result_type, result_tool_name, result_tool_description
    )
    # if the result tool is None, or its schema allows `str`, we allow plain text results
    self._allow_text_result = self._result_schema is None or self._result_schema.allow_text_result

    self._system_prompts = (system_prompt,) if isinstance(system_prompt, str) else tuple(system_prompt)
    self._function_tools: dict[str, _r.Tool[AgentDeps, Any]] = {}
    self._deps_type = deps_type
    self._default_retries = retries
    self._system_prompt_functions = []
    self._max_result_retries = result_retries if result_retries is not None else retries
    self._current_result_retry = 0
    self._result_validators = []

run async

run(
    user_prompt: str,
    *,
    message_history: list[Message] | None = None,
    model: Model | KnownModelName | None = None,
    deps: AgentDeps = None
) -> RunResult[ResultData]

Run the agent with a user prompt in async mode.

Parameters:

Name Type Description Default
user_prompt str

User input to start/continue the conversation.

required
message_history list[Message] | None

History of the conversation so far.

None
model Model | KnownModelName | None

Optional model to use for this run, required if model was not set when creating the agent.

None
deps AgentDeps

Optional dependencies to use for this run.

None

Returns:

Type Description
RunResult[ResultData]

The result of the run.

Source code in pydantic_ai_slim/pydantic_ai/agent.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
async def run(
    self,
    user_prompt: str,
    *,
    message_history: list[_messages.Message] | None = None,
    model: models.Model | models.KnownModelName | None = None,
    deps: AgentDeps = None,
) -> result.RunResult[ResultData]:
    """Run the agent with a user prompt in async mode.

    Args:
        user_prompt: User input to start/continue the conversation.
        message_history: History of the conversation so far.
        model: Optional model to use for this run, required if `model` was not set when creating the agent.
        deps: Optional dependencies to use for this run.

    Returns:
        The result of the run.
    """
    model_used, custom_model, agent_model = await self._get_agent_model(model)

    deps = self._get_deps(deps)

    with _logfire.span(
        'agent run {prompt=}',
        prompt=user_prompt,
        agent=self,
        custom_model=custom_model,
        model_name=model_used.name(),
    ) as run_span:
        new_message_index, messages = await self._prepare_messages(deps, user_prompt, message_history)
        self.last_run_messages = messages

        for tool in self._function_tools.values():
            tool.reset()

        cost = result.Cost()

        run_step = 0
        while True:
            run_step += 1
            with _logfire.span('model request {run_step=}', run_step=run_step) as model_req_span:
                model_response, request_cost = await agent_model.request(messages)
                model_req_span.set_attribute('response', model_response)
                model_req_span.set_attribute('cost', request_cost)
                model_req_span.message = f'model request -> {model_response.role}'

            messages.append(model_response)
            cost += request_cost

            with _logfire.span('handle model response') as handle_span:
                either = await self._handle_model_response(model_response, deps)

                if isinstance(either, _MarkFinalResult):
                    # we have a final result, end the conversation
                    result_data = either.data
                    run_span.set_attribute('all_messages', messages)
                    run_span.set_attribute('cost', cost)
                    handle_span.set_attribute('result', result_data)
                    handle_span.message = 'handle model response -> final result'
                    return result.RunResult(messages, new_message_index, result_data, cost)
                else:
                    # continue the conversation
                    tool_responses = either
                    handle_span.set_attribute('tool_responses', tool_responses)
                    response_msgs = ' '.join(m.role for m in tool_responses)
                    handle_span.message = f'handle model response -> {response_msgs}'
                    messages.extend(tool_responses)

run_sync

run_sync(
    user_prompt: str,
    *,
    message_history: list[Message] | None = None,
    model: Model | KnownModelName | None = None,
    deps: AgentDeps = None
) -> RunResult[ResultData]

Run the agent with a user prompt synchronously.

This is a convenience method that wraps self.run with loop.run_until_complete().

Parameters:

Name Type Description Default
user_prompt str

User input to start/continue the conversation.

required
message_history list[Message] | None

History of the conversation so far.

None
model Model | KnownModelName | None

Optional model to use for this run, required if model was not set when creating the agent.

None
deps AgentDeps

Optional dependencies to use for this run.

None

Returns:

Type Description
RunResult[ResultData]

The result of the run.

Source code in pydantic_ai_slim/pydantic_ai/agent.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
def run_sync(
    self,
    user_prompt: str,
    *,
    message_history: list[_messages.Message] | None = None,
    model: models.Model | models.KnownModelName | None = None,
    deps: AgentDeps = None,
) -> result.RunResult[ResultData]:
    """Run the agent with a user prompt synchronously.

    This is a convenience method that wraps `self.run` with `loop.run_until_complete()`.

    Args:
        user_prompt: User input to start/continue the conversation.
        message_history: History of the conversation so far.
        model: Optional model to use for this run, required if `model` was not set when creating the agent.
        deps: Optional dependencies to use for this run.

    Returns:
        The result of the run.
    """
    loop = asyncio.get_event_loop()
    return loop.run_until_complete(self.run(user_prompt, message_history=message_history, model=model, deps=deps))

run_stream async

run_stream(
    user_prompt: str,
    *,
    message_history: list[Message] | None = None,
    model: Model | KnownModelName | None = None,
    deps: AgentDeps = None
) -> AsyncIterator[
    StreamedRunResult[AgentDeps, ResultData]
]

Run the agent with a user prompt in async mode, returning a streamed response.

Parameters:

Name Type Description Default
user_prompt str

User input to start/continue the conversation.

required
message_history list[Message] | None

History of the conversation so far.

None
model Model | KnownModelName | None

Optional model to use for this run, required if model was not set when creating the agent.

None
deps AgentDeps

Optional dependencies to use for this run.

None

Returns:

Type Description
AsyncIterator[StreamedRunResult[AgentDeps, ResultData]]

The result of the run.

Source code in pydantic_ai_slim/pydantic_ai/agent.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
@asynccontextmanager
async def run_stream(
    self,
    user_prompt: str,
    *,
    message_history: list[_messages.Message] | None = None,
    model: models.Model | models.KnownModelName | None = None,
    deps: AgentDeps = None,
) -> AsyncIterator[result.StreamedRunResult[AgentDeps, ResultData]]:
    """Run the agent with a user prompt in async mode, returning a streamed response.

    Args:
        user_prompt: User input to start/continue the conversation.
        message_history: History of the conversation so far.
        model: Optional model to use for this run, required if `model` was not set when creating the agent.
        deps: Optional dependencies to use for this run.

    Returns:
        The result of the run.
    """
    model_used, custom_model, agent_model = await self._get_agent_model(model)

    deps = self._get_deps(deps)

    with _logfire.span(
        'agent run stream {prompt=}',
        prompt=user_prompt,
        agent=self,
        custom_model=custom_model,
        model_name=model_used.name(),
    ) as run_span:
        new_message_index, messages = await self._prepare_messages(deps, user_prompt, message_history)
        self.last_run_messages = messages

        for tool in self._function_tools.values():
            tool.reset()

        cost = result.Cost()

        run_step = 0
        while True:
            run_step += 1
            with _logfire.span('model request {run_step=}', run_step=run_step) as model_req_span:
                async with agent_model.request_stream(messages) as model_response:
                    model_req_span.set_attribute('response_type', model_response.__class__.__name__)
                    # We want to end the "model request" span here, but we can't exit the context manager
                    # in the traditional way
                    model_req_span.__exit__(None, None, None)

                    with _logfire.span('handle model response') as handle_span:
                        either = await self._handle_streamed_model_response(model_response, deps)

                        if isinstance(either, _MarkFinalResult):
                            result_stream = either.data
                            run_span.set_attribute('all_messages', messages)
                            handle_span.set_attribute('result_type', result_stream.__class__.__name__)
                            handle_span.message = 'handle model response -> final result'
                            yield result.StreamedRunResult(
                                messages,
                                new_message_index,
                                cost,
                                result_stream,
                                self._result_schema,
                                deps,
                                self._result_validators,
                                lambda m: run_span.set_attribute('all_messages', messages),
                            )
                            return
                        else:
                            tool_responses = either
                            handle_span.set_attribute('tool_responses', tool_responses)
                            response_msgs = ' '.join(m.role for m in tool_responses)
                            handle_span.message = f'handle model response -> {response_msgs}'
                            messages.extend(tool_responses)
                            # the model_response should have been fully streamed by now, we can add it's cost
                            cost += model_response.cost()

model instance-attribute

model: Model | KnownModelName | None

The default model configured for this agent.

override

override(
    *,
    deps: AgentDeps | Unset = UNSET,
    model: Model | KnownModelName | Unset = UNSET
) -> Iterator[None]

Context manager to temporarily override agent dependencies and model.

This is particularly useful when testing.

Parameters:

Name Type Description Default
deps AgentDeps | Unset

The dependencies to use instead of the dependencies passed to the agent run.

UNSET
model Model | KnownModelName | Unset

The model to use instead of the model passed to the agent run.

UNSET
Source code in pydantic_ai_slim/pydantic_ai/agent.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
@contextmanager
def override(
    self,
    *,
    deps: AgentDeps | _utils.Unset = _utils.UNSET,
    model: models.Model | models.KnownModelName | _utils.Unset = _utils.UNSET,
) -> Iterator[None]:
    """Context manager to temporarily override agent dependencies and model.

    This is particularly useful when testing.

    Args:
        deps: The dependencies to use instead of the dependencies passed to the agent run.
        model: The model to use instead of the model passed to the agent run.
    """
    if _utils.is_set(deps):
        override_deps_before = self._override_deps
        self._override_deps = _utils.Some(deps)
    else:
        override_deps_before = _utils.UNSET

    # noinspection PyTypeChecker
    if _utils.is_set(model):
        override_model_before = self._override_model
        # noinspection PyTypeChecker
        self._override_model = _utils.Some(models.infer_model(model))  # pyright: ignore[reportArgumentType]
    else:
        override_model_before = _utils.UNSET

    try:
        yield
    finally:
        if _utils.is_set(override_deps_before):
            self._override_deps = override_deps_before
        if _utils.is_set(override_model_before):
            self._override_model = override_model_before

last_run_messages class-attribute instance-attribute

last_run_messages: list[Message] | None = None

The messages from the last run, useful when a run raised an exception.

Note: these are not used by the agent, e.g. in future runs, they are just stored for developers' convenience.

system_prompt

system_prompt(
    func: Callable[[RunContext[AgentDeps]], str]
) -> Callable[[RunContext[AgentDeps]], str]
system_prompt(func: Callable[[], str]) -> Callable[[], str]
system_prompt(
    func: Callable[[], Awaitable[str]]
) -> Callable[[], Awaitable[str]]

Decorator to register a system prompt function.

Optionally takes RunContext as it's only argument. Can decorate a sync or async functions.

Overloads for every possible signature of system_prompt are included so the decorator doesn't obscure the type of the function, see tests/typed_agent.py for tests.

Example:

from pydantic_ai import Agent, RunContext

agent = Agent('test', deps_type=str)

@agent.system_prompt
def simple_system_prompt() -> str:
    return 'foobar'

@agent.system_prompt
async def async_system_prompt(ctx: RunContext[str]) -> str:
    return f'{ctx.deps} is the best'

result = agent.run_sync('foobar', deps='spam')
print(result.data)
#> success (no tool calls)

Source code in pydantic_ai_slim/pydantic_ai/agent.py
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
def system_prompt(
    self, func: _system_prompt.SystemPromptFunc[AgentDeps], /
) -> _system_prompt.SystemPromptFunc[AgentDeps]:
    """Decorator to register a system prompt function.

    Optionally takes [`RunContext`][pydantic_ai.dependencies.RunContext] as it's only argument.
    Can decorate a sync or async functions.

    Overloads for every possible signature of `system_prompt` are included so the decorator doesn't obscure
    the type of the function, see `tests/typed_agent.py` for tests.

    Example:
    ```py
    from pydantic_ai import Agent, RunContext

    agent = Agent('test', deps_type=str)

    @agent.system_prompt
    def simple_system_prompt() -> str:
        return 'foobar'

    @agent.system_prompt
    async def async_system_prompt(ctx: RunContext[str]) -> str:
        return f'{ctx.deps} is the best'

    result = agent.run_sync('foobar', deps='spam')
    print(result.data)
    #> success (no tool calls)
    ```
    """
    self._system_prompt_functions.append(_system_prompt.SystemPromptRunner(func))
    return func

tool

tool(
    func: (
        ToolContextFunc[AgentDeps, ToolParams] | None
    ) = None,
    /,
    *,
    retries: int | None = None,
) -> Any

Decorator to register a tool function which takes RunContext as its first argument.

Can decorate a sync or async functions.

The docstring is inspected to extract both the tool description and description of each parameter, learn more.

We can't add overloads for every possible signature of tool, since the return type is a recursive union so the signature of functions decorated with @agent.tool is obscured.

Example:

from pydantic_ai import Agent, RunContext

agent = Agent('test', deps_type=int)

@agent.tool
def foobar(ctx: RunContext[int], x: int) -> int:
    return ctx.deps + x

@agent.tool(retries=2)
async def spam(ctx: RunContext[str], y: float) -> float:
    return ctx.deps + y

result = agent.run_sync('foobar', deps=1)
print(result.data)
#> {"foobar":1,"spam":1.0}

Parameters:

Name Type Description Default
func ToolContextFunc[AgentDeps, ToolParams] | None

The tool function to register.

None
retries int | None

The number of retries to allow for this tool, defaults to the agent's default retries, which defaults to 1.

None
Source code in pydantic_ai_slim/pydantic_ai/agent.py
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
def tool(
    self,
    func: ToolContextFunc[AgentDeps, ToolParams] | None = None,
    /,
    *,
    retries: int | None = None,
) -> Any:
    """Decorator to register a tool function which takes
    [`RunContext`][pydantic_ai.dependencies.RunContext] as its first argument.

    Can decorate a sync or async functions.

    The docstring is inspected to extract both the tool description and description of each parameter,
    [learn more](../agents.md#function-tools-and-schema).

    We can't add overloads for every possible signature of tool, since the return type is a recursive union
    so the signature of functions decorated with `@agent.tool` is obscured.

    Example:
    ```py
    from pydantic_ai import Agent, RunContext

    agent = Agent('test', deps_type=int)

    @agent.tool
    def foobar(ctx: RunContext[int], x: int) -> int:
        return ctx.deps + x

    @agent.tool(retries=2)
    async def spam(ctx: RunContext[str], y: float) -> float:
        return ctx.deps + y

    result = agent.run_sync('foobar', deps=1)
    print(result.data)
    #> {"foobar":1,"spam":1.0}
    ```

    Args:
        func: The tool function to register.
        retries: The number of retries to allow for this tool, defaults to the agent's default retries,
            which defaults to 1.
    """  # noqa: D205
    if func is None:

        def tool_decorator(
            func_: ToolContextFunc[AgentDeps, ToolParams],
        ) -> ToolContextFunc[AgentDeps, ToolParams]:
            # noinspection PyTypeChecker
            self._register_tool(_utils.Either(left=func_), retries)
            return func_

        return tool_decorator
    else:
        # noinspection PyTypeChecker
        self._register_tool(_utils.Either(left=func), retries)
        return func

tool_plain

tool_plain(
    *, retries: int | None = None
) -> Callable[
    [ToolPlainFunc[ToolParams]], ToolPlainFunc[ToolParams]
]
tool_plain(
    func: ToolPlainFunc[ToolParams] | None = None,
    /,
    *,
    retries: int | None = None,
) -> Any

Decorator to register a tool function which DOES NOT take RunContext as an argument.

Can decorate a sync or async functions.

The docstring is inspected to extract both the tool description and description of each parameter, learn more.

We can't add overloads for every possible signature of tool, since the return type is a recursive union so the signature of functions decorated with @agent.tool is obscured.

Example:

from pydantic_ai import Agent, RunContext

agent = Agent('test')

@agent.tool
def foobar(ctx: RunContext[int]) -> int:
    return 123

@agent.tool(retries=2)
async def spam(ctx: RunContext[str]) -> float:
    return 3.14

result = agent.run_sync('foobar', deps=1)
print(result.data)
#> {"foobar":123,"spam":3.14}

Parameters:

Name Type Description Default
func ToolPlainFunc[ToolParams] | None

The tool function to register.

None
retries int | None

The number of retries to allow for this tool, defaults to the agent's default retries, which defaults to 1.

None
Source code in pydantic_ai_slim/pydantic_ai/agent.py
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def tool_plain(self, func: ToolPlainFunc[ToolParams] | None = None, /, *, retries: int | None = None) -> Any:
    """Decorator to register a tool function which DOES NOT take `RunContext` as an argument.

    Can decorate a sync or async functions.

    The docstring is inspected to extract both the tool description and description of each parameter,
    [learn more](../agents.md#function-tools-and-schema).

    We can't add overloads for every possible signature of tool, since the return type is a recursive union
    so the signature of functions decorated with `@agent.tool` is obscured.

    Example:
    ```py
    from pydantic_ai import Agent, RunContext

    agent = Agent('test')

    @agent.tool
    def foobar(ctx: RunContext[int]) -> int:
        return 123

    @agent.tool(retries=2)
    async def spam(ctx: RunContext[str]) -> float:
        return 3.14

    result = agent.run_sync('foobar', deps=1)
    print(result.data)
    #> {"foobar":123,"spam":3.14}
    ```

    Args:
        func: The tool function to register.
        retries: The number of retries to allow for this tool, defaults to the agent's default retries,
            which defaults to 1.
    """
    if func is None:

        def tool_decorator(
            func_: ToolPlainFunc[ToolParams],
        ) -> ToolPlainFunc[ToolParams]:
            # noinspection PyTypeChecker
            self._register_tool(_utils.Either(right=func_), retries)
            return func_

        return tool_decorator
    else:
        self._register_tool(_utils.Either(right=func), retries)
        return func

result_validator

Decorator to register a result validator function.

Optionally takes RunContext as it's first argument. Can decorate a sync or async functions.

Overloads for every possible signature of result_validator are included so the decorator doesn't obscure the type of the function, see tests/typed_agent.py for tests.

Example:

from pydantic_ai import Agent, ModelRetry, RunContext

agent = Agent('test', deps_type=str)

@agent.result_validator
def result_validator_simple(data: str) -> str:
    if 'wrong' in data:
        raise ModelRetry('wrong response')
    return data

@agent.result_validator
async def result_validator_deps(ctx: RunContext[str], data: str) -> str:
    if ctx.deps in data:
        raise ModelRetry('wrong response')
    return data

result = agent.run_sync('foobar', deps='spam')
print(result.data)
#> success (no tool calls)

Source code in pydantic_ai_slim/pydantic_ai/agent.py
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
def result_validator(
    self, func: _result.ResultValidatorFunc[AgentDeps, ResultData], /
) -> _result.ResultValidatorFunc[AgentDeps, ResultData]:
    """Decorator to register a result validator function.

    Optionally takes [`RunContext`][pydantic_ai.dependencies.RunContext] as it's first argument.
    Can decorate a sync or async functions.

    Overloads for every possible signature of `result_validator` are included so the decorator doesn't obscure
    the type of the function, see `tests/typed_agent.py` for tests.

    Example:
    ```py
    from pydantic_ai import Agent, ModelRetry, RunContext

    agent = Agent('test', deps_type=str)

    @agent.result_validator
    def result_validator_simple(data: str) -> str:
        if 'wrong' in data:
            raise ModelRetry('wrong response')
        return data

    @agent.result_validator
    async def result_validator_deps(ctx: RunContext[str], data: str) -> str:
        if ctx.deps in data:
            raise ModelRetry('wrong response')
        return data

    result = agent.run_sync('foobar', deps='spam')
    print(result.data)
    #> success (no tool calls)
    ```
    """
    self._result_validators.append(_result.ResultValidator(func))
    return func