diff --git a/.fern/metadata.json b/.fern/metadata.json index 9ffaaee2..8f71ce52 100644 --- a/.fern/metadata.json +++ b/.fern/metadata.json @@ -16,8 +16,8 @@ "skip_validation": true } }, - "originGitCommit": "d1854cf6d560a0e27c9f46c1d83a6d7d9924f045", + "originGitCommit": "0052a020a7becd03b349857664c9f4a89b6c449a", "originGitCommitIsDirty": true, "invokedBy": "manual", - "sdkVersion": "7.0.1" + "sdkVersion": "7.1.2" } \ No newline at end of file diff --git a/.fernignore b/.fernignore index a7f67968..ad351e36 100644 --- a/.fernignore +++ b/.fernignore @@ -49,6 +49,21 @@ src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_func src/deepgram/types/create_key_v1request_one.py src/deepgram/requests/create_key_v1request_one.py +# Backward-compatibility alias shims for the 2026-05-14 listen-provider rename. +# The spec deduplicated AgentV1SettingsAgent[Context]ListenProviderV1/V2/V2LanguageHint +# into a single top-level DeepgramListenProvider* type. These hand-written aliases +# preserve the old public import paths so existing callers keep working. +src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v1.py +src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2.py +src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2language_hint.py +src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py +src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py +src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v1.py +src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2.py +src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2language_hint.py +src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py +src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py + # Package __init__.py files that carry hand-applied legacy alias re-exports for the # above shims. Fern would otherwise regenerate these and strip the legacy entries on # every regen. Frozen to preserve the public-import surface for renamed types/params. diff --git a/AGENTS.md b/AGENTS.md index da59ccba..b98aa728 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -27,6 +27,7 @@ Current permanently frozen files: - `src/deepgram/agent/v1/types/agent_v1history_content.py`, `src/deepgram/agent/v1/types/agent_v1history_function_calls.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py` — hand-written compatibility aliases preserving old public Agent History type imports after regen renames - `src/deepgram/agent/v1/requests/agent_v1history_content.py`, `src/deepgram/agent/v1/requests/agent_v1history_function_calls.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py` — hand-written compatibility aliases preserving old public Agent History request-param imports after regen renames - `src/deepgram/types/create_key_v1request_one.py`, `src/deepgram/requests/create_key_v1request_one.py` — hand-written compatibility aliases preserving the old public create-key request imports after the regen rename to `CreateKeyV1Request` +- `src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v1.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2language_hint.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py`, `src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v1.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2language_hint.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py`, `src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py` — hand-written compatibility aliases for the 2026-05-14 spec dedup that consolidated `AgentV1SettingsAgent[Context]ListenProviderV{1,2,V2LanguageHint}` into top-level `DeepgramListenProvider*` types - `src/deepgram/transport_interface.py`, `src/deepgram/transport.py`, `src/deepgram/transports/` — custom transport layer - `tests/custom/test_agent_history.py` — hand-written regression test for Agent History websocket payload parsing - `tests/custom/test_compat_aliases.py` — hand-written regression test for backward-compatible alias imports after regen renames diff --git a/poetry.lock b/poetry.lock index 340fff47..c5795871 100644 --- a/poetry.lock +++ b/poetry.lock @@ -634,13 +634,13 @@ httpx = ">=0.27.0" [[package]] name = "idna" -version = "3.13" +version = "3.15" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.8" files = [ - {file = "idna-3.13-py3-none-any.whl", hash = "sha256:892ea0cde124a99ce773decba204c5552b69c3c67ffd5f232eb7696135bc8bb3"}, - {file = "idna-3.13.tar.gz", hash = "sha256:585ea8fe5d69b9181ec1afba340451fba6ba764af97026f92a91d4eef164a242"}, + {file = "idna-3.15-py3-none-any.whl", hash = "sha256:048adeaf8c2d788c40fee287673ccaa74c24ffd8dcf09ffa555a2fbb59f10ac8"}, + {file = "idna-3.15.tar.gz", hash = "sha256:ca962446ea538f7092a95e057da437618e886f4d349216d2b1e294abfdb65fdc"}, ] [package.extras] @@ -907,133 +907,132 @@ testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "propcache" -version = "0.4.1" +version = "0.5.2" description = "Accelerated property cache" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "propcache-0.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c2d1fa3201efaf55d730400d945b5b3ab6e672e100ba0f9a409d950ab25d7db"}, - {file = "propcache-0.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1eb2994229cc8ce7fe9b3db88f5465f5fd8651672840b2e426b88cdb1a30aac8"}, - {file = "propcache-0.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66c1f011f45a3b33d7bcb22daed4b29c0c9e2224758b6be00686731e1b46f925"}, - {file = "propcache-0.4.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a52009f2adffe195d0b605c25ec929d26b36ef986ba85244891dee3b294df21"}, - {file = "propcache-0.4.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5d4e2366a9c7b837555cf02fb9be2e3167d333aff716332ef1b7c3a142ec40c5"}, - {file = "propcache-0.4.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9d2b6caef873b4f09e26ea7e33d65f42b944837563a47a94719cc3544319a0db"}, - {file = "propcache-0.4.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b16ec437a8c8a965ecf95739448dd938b5c7f56e67ea009f4300d8df05f32b7"}, - {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:296f4c8ed03ca7476813fe666c9ea97869a8d7aec972618671b33a38a5182ef4"}, - {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:1f0978529a418ebd1f49dad413a2b68af33f85d5c5ca5c6ca2a3bed375a7ac60"}, - {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fd138803047fb4c062b1c1dd95462f5209456bfab55c734458f15d11da288f8f"}, - {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8c9b3cbe4584636d72ff556d9036e0c9317fa27b3ac1f0f558e7e84d1c9c5900"}, - {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f93243fdc5657247533273ac4f86ae106cc6445a0efacb9a1bfe982fcfefd90c"}, - {file = "propcache-0.4.1-cp310-cp310-win32.whl", hash = "sha256:a0ee98db9c5f80785b266eb805016e36058ac72c51a064040f2bc43b61101cdb"}, - {file = "propcache-0.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cdb7988c4e5ac7f6d175a28a9aa0c94cb6f2ebe52756a3c0cda98d2809a9e37"}, - {file = "propcache-0.4.1-cp310-cp310-win_arm64.whl", hash = "sha256:d82ad62b19645419fe79dd63b3f9253e15b30e955c0170e5cebc350c1844e581"}, - {file = "propcache-0.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:60a8fda9644b7dfd5dece8c61d8a85e271cb958075bfc4e01083c148b61a7caf"}, - {file = "propcache-0.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c30b53e7e6bda1d547cabb47c825f3843a0a1a42b0496087bb58d8fedf9f41b5"}, - {file = "propcache-0.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6918ecbd897443087a3b7cd978d56546a812517dcaaca51b49526720571fa93e"}, - {file = "propcache-0.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d902a36df4e5989763425a8ab9e98cd8ad5c52c823b34ee7ef307fd50582566"}, - {file = "propcache-0.4.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a9695397f85973bb40427dedddf70d8dc4a44b22f1650dd4af9eedf443d45165"}, - {file = "propcache-0.4.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2bb07ffd7eaad486576430c89f9b215f9e4be68c4866a96e97db9e97fead85dc"}, - {file = "propcache-0.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd6f30fdcf9ae2a70abd34da54f18da086160e4d7d9251f81f3da0ff84fc5a48"}, - {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fc38cba02d1acba4e2869eef1a57a43dfbd3d49a59bf90dda7444ec2be6a5570"}, - {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:67fad6162281e80e882fb3ec355398cf72864a54069d060321f6cd0ade95fe85"}, - {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f10207adf04d08bec185bae14d9606a1444715bc99180f9331c9c02093e1959e"}, - {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e9b0d8d0845bbc4cfcdcbcdbf5086886bc8157aa963c31c777ceff7846c77757"}, - {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:981333cb2f4c1896a12f4ab92a9cc8f09ea664e9b7dbdc4eff74627af3a11c0f"}, - {file = "propcache-0.4.1-cp311-cp311-win32.whl", hash = "sha256:f1d2f90aeec838a52f1c1a32fe9a619fefd5e411721a9117fbf82aea638fe8a1"}, - {file = "propcache-0.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:364426a62660f3f699949ac8c621aad6977be7126c5807ce48c0aeb8e7333ea6"}, - {file = "propcache-0.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:e53f3a38d3510c11953f3e6a33f205c6d1b001129f972805ca9b42fc308bc239"}, - {file = "propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2"}, - {file = "propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403"}, - {file = "propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207"}, - {file = "propcache-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec17c65562a827bba85e3872ead335f95405ea1674860d96483a02f5c698fa72"}, - {file = "propcache-0.4.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:405aac25c6394ef275dee4c709be43745d36674b223ba4eb7144bf4d691b7367"}, - {file = "propcache-0.4.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0013cb6f8dde4b2a2f66903b8ba740bdfe378c943c4377a200551ceb27f379e4"}, - {file = "propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15932ab57837c3368b024473a525e25d316d8353016e7cc0e5ba9eb343fbb1cf"}, - {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:031dce78b9dc099f4c29785d9cf5577a3faf9ebf74ecbd3c856a7b92768c3df3"}, - {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ab08df6c9a035bee56e31af99be621526bd237bea9f32def431c656b29e41778"}, - {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4d7af63f9f93fe593afbf104c21b3b15868efb2c21d07d8732c0c4287e66b6a6"}, - {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cfc27c945f422e8b5071b6e93169679e4eb5bf73bbcbf1ba3ae3a83d2f78ebd9"}, - {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35c3277624a080cc6ec6f847cbbbb5b49affa3598c4535a0a4682a697aaa5c75"}, - {file = "propcache-0.4.1-cp312-cp312-win32.whl", hash = "sha256:671538c2262dadb5ba6395e26c1731e1d52534bfe9ae56d0b5573ce539266aa8"}, - {file = "propcache-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:cb2d222e72399fcf5890d1d5cc1060857b9b236adff2792ff48ca2dfd46c81db"}, - {file = "propcache-0.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:204483131fb222bdaaeeea9f9e6c6ed0cac32731f75dfc1d4a567fc1926477c1"}, - {file = "propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf"}, - {file = "propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311"}, - {file = "propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74"}, - {file = "propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe"}, - {file = "propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af"}, - {file = "propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c"}, - {file = "propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f"}, - {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1"}, - {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24"}, - {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa"}, - {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61"}, - {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66"}, - {file = "propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81"}, - {file = "propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e"}, - {file = "propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1"}, - {file = "propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b"}, - {file = "propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566"}, - {file = "propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835"}, - {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e"}, - {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859"}, - {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b"}, - {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0"}, - {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af"}, - {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393"}, - {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874"}, - {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7"}, - {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1"}, - {file = "propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717"}, - {file = "propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37"}, - {file = "propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a"}, - {file = "propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12"}, - {file = "propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c"}, - {file = "propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded"}, - {file = "propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641"}, - {file = "propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4"}, - {file = "propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44"}, - {file = "propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d"}, - {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b"}, - {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e"}, - {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f"}, - {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49"}, - {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144"}, - {file = "propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f"}, - {file = "propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153"}, - {file = "propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992"}, - {file = "propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f"}, - {file = "propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393"}, - {file = "propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0"}, - {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a"}, - {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be"}, - {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc"}, - {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a"}, - {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89"}, - {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726"}, - {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367"}, - {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36"}, - {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455"}, - {file = "propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85"}, - {file = "propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1"}, - {file = "propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9"}, - {file = "propcache-0.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d233076ccf9e450c8b3bc6720af226b898ef5d051a2d145f7d765e6e9f9bcff"}, - {file = "propcache-0.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:357f5bb5c377a82e105e44bd3d52ba22b616f7b9773714bff93573988ef0a5fb"}, - {file = "propcache-0.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbc3b6dfc728105b2a57c06791eb07a94229202ea75c59db644d7d496b698cac"}, - {file = "propcache-0.4.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:182b51b421f0501952d938dc0b0eb45246a5b5153c50d42b495ad5fb7517c888"}, - {file = "propcache-0.4.1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b536b39c5199b96fc6245eb5fb796c497381d3942f169e44e8e392b29c9ebcc"}, - {file = "propcache-0.4.1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:db65d2af507bbfbdcedb254a11149f894169d90488dd3e7190f7cdcb2d6cd57a"}, - {file = "propcache-0.4.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd2dbc472da1f772a4dae4fa24be938a6c544671a912e30529984dd80400cd88"}, - {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:daede9cd44e0f8bdd9e6cc9a607fc81feb80fae7a5fc6cecaff0e0bb32e42d00"}, - {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:71b749281b816793678ae7f3d0d84bd36e694953822eaad408d682efc5ca18e0"}, - {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:0002004213ee1f36cfb3f9a42b5066100c44276b9b72b4e1504cddd3d692e86e"}, - {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fe49d0a85038f36ba9e3ffafa1103e61170b28e95b16622e11be0a0ea07c6781"}, - {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:99d43339c83aaf4d32bda60928231848eee470c6bda8d02599cc4cebe872d183"}, - {file = "propcache-0.4.1-cp39-cp39-win32.whl", hash = "sha256:a129e76735bc792794d5177069691c3217898b9f5cee2b2661471e52ffe13f19"}, - {file = "propcache-0.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:948dab269721ae9a87fd16c514a0a2c2a1bdb23a9a61b969b0f9d9ee2968546f"}, - {file = "propcache-0.4.1-cp39-cp39-win_arm64.whl", hash = "sha256:5fd37c406dd6dc85aa743e214cef35dc54bbdd1419baac4f6ae5e5b1a2976938"}, - {file = "propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237"}, - {file = "propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d"}, + {file = "propcache-0.5.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a81be28596d6559f6131ef33e10200de6e17643b3c74ce03f9eb103be6ae8b"}, + {file = "propcache-0.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29cbaac5ea0212663e6845e04b5e188d5a6ae6dd919810ac835bf1d3b42c3f4c"}, + {file = "propcache-0.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6bf3be92233808fcd338eba0fb4d0b59ec5772af4f4ecfcec450d1bfc0f8b5eb"}, + {file = "propcache-0.5.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2f8ea531c794b9d6274acd4e8d2c2ebcac590a4361d27482edd3010b79f1325e"}, + {file = "propcache-0.5.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:decfca4c79dd53ebab484b00cc4b6717d8c369f86e74aa4ca395a64ac651495e"}, + {file = "propcache-0.5.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4621064bbf28fa77ff64dd5d94367c04684c67d3a5bf1dff25f0cd0d98a38f3b"}, + {file = "propcache-0.5.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b96db7141a592cbc968daf1feea83a118e6ab378af4abbc72b248c895414c22d"}, + {file = "propcache-0.5.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1ca071adabaab6e9219924bbe00af821f1ee7de113a9eca1cdc292de3d120f4d"}, + {file = "propcache-0.5.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e4294d04a94dcab1b3bccd8b66d962dcad411a1d19414b2a41d1445f1de32ad0"}, + {file = "propcache-0.5.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a0e399a2eccb91ed18721f86aa85757727400b6865c89e88934781deb9c8498b"}, + {file = "propcache-0.5.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:823581fd5cb08b12a48bfa11fe962a7916766b6170c17b028fbdf762b85eb9bf"}, + {file = "propcache-0.5.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:949c91d1a990cf3b2e8188dfcfb25005e0b834a06c63fa4ef9f360878ce21ecf"}, + {file = "propcache-0.5.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:cc1177027eda740fdb152706bd215a3f124e3eea15afc39f2cb9fe351b50619e"}, + {file = "propcache-0.5.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b05d643f944a8c3c4bd86d65ffd87bf3264b617f87791940302bc474d2ff5274"}, + {file = "propcache-0.5.2-cp310-cp310-win32.whl", hash = "sha256:8114f28879e0904748e831c3a7774261bd9e75f49be089f389a76f959dcd13fe"}, + {file = "propcache-0.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:5fcb98e7598b1ee0addab320d90f65b530297a867dbfe9de52ea838077e16e3d"}, + {file = "propcache-0.5.2-cp310-cp310-win_arm64.whl", hash = "sha256:04dc2390d9edbbaef7461f33322555976ffddf0b650a038649d026358714e6c5"}, + {file = "propcache-0.5.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74b70780220e2dd89175ca24b81b68b67c83db499ae611e7f2313cb329801c78"}, + {file = "propcache-0.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a4840ab0ae0216d952f4b53dc6d0b992bfc2bedbfe360bdd9b548bc184c08959"}, + {file = "propcache-0.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6844ba6364fb12f403928a82cfd295ab103a2b315c77c747b2dbe4a41894ea7"}, + {file = "propcache-0.5.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2293949b855ce597f2826452d17c2d545fb5622379c4ea6fdf525e9b8e8a2511"}, + {file = "propcache-0.5.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0fd59b5af35f74da48d905dcbad55449ba13be91823cb05a9bd590bbf5b61660"}, + {file = "propcache-0.5.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29f9309a2e42b0d273be006fdb4be2d6c39a47f6f57d8fb1cf9f81481df81b66"}, + {file = "propcache-0.5.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5aaa2b923c1944ac8febd6609cb373540a5563e7cbcb0fd770f75dace2eb817b"}, + {file = "propcache-0.5.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66ea454f095ddf5b6b14f56c064c0941c4788be11e18d2464cf643bf7203ff67"}, + {file = "propcache-0.5.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:95f1e3f4760d404b13c9976c0229b2b49a3c8e2c62a9ce92efdd2b11ada75e3f"}, + {file = "propcache-0.5.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:85341b12b9d55bad0bded24cac341bb34289469e03a11f3f583ea1cc1db0326c"}, + {file = "propcache-0.5.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:26a4dca084132874e639895c3135dfad5eb20bae209f62d1aeb31b03e601c3c0"}, + {file = "propcache-0.5.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:3b199b9b2b3d6a7edf3183ba8a9a137a22b97f7df525feb5ae1eccf026d2a9c6"}, + {file = "propcache-0.5.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e59bc9e66329185b93dab73f210f1a37f81cb40f321501db8017c9aea15dba27"}, + {file = "propcache-0.5.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:552ffadf6ad409844bc5919c42a0a83d88314cedddaea0e41e80a8b8fffe881f"}, + {file = "propcache-0.5.2-cp311-cp311-win32.whl", hash = "sha256:cd416c1de191973c52ff1a12a57446bfc7642797b282d7caf2162d7d1b8aa9a0"}, + {file = "propcache-0.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:44e488ef40dbb452700b2b1f8188934121f6648f52c295055662d2191959ff82"}, + {file = "propcache-0.5.2-cp311-cp311-win_arm64.whl", hash = "sha256:54adaa85a22078d1e306304a40984dc5be99d599bf3dc0a24dc98f7daeab89ab"}, + {file = "propcache-0.5.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:806719138ecd720339a12410fb9614ac9b2b2d3a5fdf8235d56981c36f4039ba"}, + {file = "propcache-0.5.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:db2b80ea58eab4f86b2beec3cc8b39e8ff9276ac20e96b7cce43c8ae84cd6b5a"}, + {file = "propcache-0.5.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e5cbfac9f61484f7e9f3597775500cd3ebe8274e9b050c38f9525c77c97520bf"}, + {file = "propcache-0.5.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5dbc581d2814337da56222fab8dc5f161cd798a434e49bac27930aaef798e144"}, + {file = "propcache-0.5.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:857187f381f88c8e2fa2fe56ab94879d011b883d5a2ee5a1b60a8cd2a06846d9"}, + {file = "propcache-0.5.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:178b4a2cdaac1818e2bf1c5a99b94383fa73ea5382e032a48dec07dc5668dc42"}, + {file = "propcache-0.5.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f328175a2cde1f0ff2c4ed8ce968b9dcfb55f3a7153f39e2957ed994da13476"}, + {file = "propcache-0.5.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5671d09a36b06d0fd4a3da0fccbcae360e9b1570924171a15e9e0997f0249fba"}, + {file = "propcache-0.5.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80168e2ebe4d3ec6599d10ad8f520304ae1cad9b6c5a95372aef1b66b7bfb53a"}, + {file = "propcache-0.5.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:45f11346f884bc47444f6e6647131055844134c3175b629f84952e2b5cd62b64"}, + {file = "propcache-0.5.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e778ebd44ef4f66ed60a0416b06b489687db264a9c0b3620362f26489492913"}, + {file = "propcache-0.5.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:c0cb9ed24c8964e172768d455a38254c2dd8a552905729ce006cad3d3dda59b1"}, + {file = "propcache-0.5.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1d1ad32d9d4355e2be65574fd0bfd3677e7066b009cd5b9b2dee8aa6a6393b33"}, + {file = "propcache-0.5.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c80f4ba3e8f00189165999a742ee526ebeccedf6c3f7beb0c7df821e9772435a"}, + {file = "propcache-0.5.2-cp312-cp312-win32.whl", hash = "sha256:8c7972d8f193740d9175f0998ab38717e6cd322d5935c5b0fef8c0d323fd9031"}, + {file = "propcache-0.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:d9ee8826a7d47863a08ac44e1a5f611a462eefc3a194b492da242128bec75b42"}, + {file = "propcache-0.5.2-cp312-cp312-win_arm64.whl", hash = "sha256:2800a4a8ead6b28cccd1ec54b59346f0def7922ee1c7598e8499c733cfbb7c84"}, + {file = "propcache-0.5.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:099aaf4b4d1a02265b92a977edf00b5c4f63b3b17ac6de39b0d637c9cac0188a"}, + {file = "propcache-0.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:68ce1c44c7a813a7f71ea04315a8c7b330b63db99d059a797a4651bb6f69f117"}, + {file = "propcache-0.5.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fc299c129490f55f254cd90be0deca4764e36e9a7c08b4aa588479a3bbed3098"}, + {file = "propcache-0.5.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6ae2198be502c10f09b2516e7b5d019816924bc3183a43ce792a7bd6625e6f4"}, + {file = "propcache-0.5.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6041d31504dc1779d700e1edcfb08eea334b357620b06681a4eabb57a74e574e"}, + {file = "propcache-0.5.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7eabc04151c78a9f4d5bbb5f1faf571e4defeb4b585e0fe95b60ff2dbe4d3d7"}, + {file = "propcache-0.5.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4db0ba63d693afd40d249bd93f842b5f144f8fcbb83de05660373bcf30517b1d"}, + {file = "propcache-0.5.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1dbcf7675229b35d31abb6547d8ebc8c27a830ac3f9a794edff6254873ec7c0a"}, + {file = "propcache-0.5.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d310c013aad2c72f1c3f2f8dd3279d460a858c551f97aeb8c63e4693cca7b4d2"}, + {file = "propcache-0.5.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:06187263ddad280d05b4d8a8b3bb7d164cbebd469236544a42e6d9b28ac6a4fa"}, + {file = "propcache-0.5.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3115559b8effafd63b142ea5ed53d63a16ea6469cbc63dce4ee194b42db5d853"}, + {file = "propcache-0.5.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c60462af8e6dc30c35407c7237ea908d777b22862bbee27bc4699c0d8bcdc45a"}, + {file = "propcache-0.5.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40314bca9ac559716fe374094fc81c11dcc34b64fd6c585360f5775690505704"}, + {file = "propcache-0.5.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cfa21e036ce1e1db2be04ba3b85d2df1bb1702fa01932d984c5464c665228ff4"}, + {file = "propcache-0.5.2-cp313-cp313-win32.whl", hash = "sha256:f156a3529f38063b6dbaf356e15602a7f95f8055b1295a438433a6386f10463d"}, + {file = "propcache-0.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:dfed59d0a5aeb01e242e66ff0300bc4a265a7c05f612d30016f0b60b1017d757"}, + {file = "propcache-0.5.2-cp313-cp313-win_arm64.whl", hash = "sha256:ba338430e87ceb9c8f0cf754de38a9860560261e56c00376debd628698a7364f"}, + {file = "propcache-0.5.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a592f5f3da71c8691c788c13cb6734b6d17663d2e1cb8caddf0673d01ef8847d"}, + {file = "propcache-0.5.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6a997d0489e9668a384fcfd5061b857aa5361de73191cac204d04b889cfbbafa"}, + {file = "propcache-0.5.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:10734b5484ea113152ee25a91dccedf81631791805d2c9ccb054958e51842c94"}, + {file = "propcache-0.5.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cafca7e56c12bb02ae16d283742bef25a61122e9dab2b5b3f2ccbe589ce32164"}, + {file = "propcache-0.5.2-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f064f8d2b59177878b7615df1735cd8fe3462ed6be8c7b217d17a276489c2b7f"}, + {file = "propcache-0.5.2-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f78abfa8dfc32376fd1aacf597b2f2fbbe0ea751419aee718af5d4f82537ef8c"}, + {file = "propcache-0.5.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7467da8a9822bf1a55336f877340c5bcbd3c482afc43a99771169f74a26dedc"}, + {file = "propcache-0.5.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a6ddc6ac9e25de626c1f129c1b467d7ecd33ce2237d3fd0c4e429feef0a7ee1f"}, + {file = "propcache-0.5.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2f22cbbac9e26a8e864c0985ff1268d5d939d53d9d9411a9824279097e03a2cb"}, + {file = "propcache-0.5.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:fc76378c62a0f04d0cd82fbb1a2cd2d7e28fcb40d5873f28a6c44e388aaa2751"}, + {file = "propcache-0.5.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:acd2c8edba48e31e58a363b8cf4e5c7db3b04b3f9e371f601df30d9b0d244836"}, + {file = "propcache-0.5.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:452b5065457eb9991ec5eb38ff41d6cd4c991c9ac7c531c4d5849ae473a9a13f"}, + {file = "propcache-0.5.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:3430bb2bfe1331885c427745a751e774ee679fd4344f80b97bf879815fe8fa55"}, + {file = "propcache-0.5.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cef6cea3922890dd6c9654971001fa797b526c16ab5e1e46c05fd6f877be7568"}, + {file = "propcache-0.5.2-cp313-cp313t-win32.whl", hash = "sha256:72d61e16dd78228b58c5d47be830ff3da7e5f139abdf0aef9d86cde1c5cf2191"}, + {file = "propcache-0.5.2-cp313-cp313t-win_amd64.whl", hash = "sha256:0958834041a0166d343b8d2cedcd8bcbaeb4fdbe0cf08320c5379f143c3be6e7"}, + {file = "propcache-0.5.2-cp313-cp313t-win_arm64.whl", hash = "sha256:6de8bd93ddde9b992cf2b2e0d796d501a19026b5b9fd87356d7d0779531a8d96"}, + {file = "propcache-0.5.2-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:46088abff4cba581dea21ae0467a480526cb25aa5f3c269e909f800328bc3999"}, + {file = "propcache-0.5.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:fc88b26f08d634f7bc819a7852e5214f5802641ab8d9fd5326892292eee1993e"}, + {file = "propcache-0.5.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:97797ebb098e670a2f92dd66f32897e30d7615b14e7f59711de23e30a9072539"}, + {file = "propcache-0.5.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba57fffe4ac99c5d30076161b5866336d97600769bad35cc68f7774b15298a4e"}, + {file = "propcache-0.5.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:583c19759d9eec1e5b69e2fbef36a7d9c326041be9746cb822d335c8cedc2979"}, + {file = "propcache-0.5.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d0326e2e5e1f3163fa306c834e48e8d490e5fae607a097a40c0648109b47ba80"}, + {file = "propcache-0.5.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e00820e192c8dbebcafb383ebbf99030895f09905e7a0eb2e0340a0bcc2bc825"}, + {file = "propcache-0.5.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c66afea89b1e43725731d2004732a046fe6fe955d51f952c3e95a7314a284a39"}, + {file = "propcache-0.5.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d4dc37dec6c6cdad0b57881a5658fd14fbf53e333b1a86cf86559f190e1d9ec4"}, + {file = "propcache-0.5.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:5570dbcc97571c15f68068e529c92715a12f8d54030e272d264b377e22bd17a5"}, + {file = "propcache-0.5.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f814362777a9f841adddb200ecdf8f5cb1e5a3c4b7a86378edbd6ccb26edd702"}, + {file = "propcache-0.5.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:196913dea116aeb5a2ba95af4ddcb7ea85559ae07d8eee8751688310d09168c3"}, + {file = "propcache-0.5.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:6e7b8719005dd1175be4ab1cd25e9b98659a5e0347331506ec6760d2773a7fb5"}, + {file = "propcache-0.5.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:51f96d685ab16e88cab128cd37a52c5da540809c8b879fa047731bfcb4ad35a4"}, + {file = "propcache-0.5.2-cp314-cp314-win32.whl", hash = "sha256:cc6fc3cc62e8501d3ed62894425040d2728ecddb1ed072737a5c70bd537aa9f0"}, + {file = "propcache-0.5.2-cp314-cp314-win_amd64.whl", hash = "sha256:81e3a30b0bb60caa22033dd0f8a3618d1d67356212514f62c57db75cb0ef410c"}, + {file = "propcache-0.5.2-cp314-cp314-win_arm64.whl", hash = "sha256:0d2c9bf8528f135dbb805ce027567e09164f7efa51a2be07458a2c0420f292d0"}, + {file = "propcache-0.5.2-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:4bc8ff1feffc6a61c7002ffe84634c41b822e104990ae009f44a0834430070bb"}, + {file = "propcache-0.5.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:79aa3ff0a9b566633b642fa9caf7e21ed1c13d6feca718187873f199e1514078"}, + {file = "propcache-0.5.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1b31822f4474c4036bae62de9402710051d431a606d6a0f907fec79935a071aa"}, + {file = "propcache-0.5.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13fef48778b5a2a756523fdb781326b028ca75e32858b04f2cdd19f394564917"}, + {file = "propcache-0.5.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8b73ab70f1a3351fbc71f663b3e645af6dd0329100c353081cf69c37433fc6fe"}, + {file = "propcache-0.5.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5538d2c13d93e4698af7e092b57bc7298fd35d1d58e656ae18f23ee0d0378e03"}, + {file = "propcache-0.5.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd645f03898405cabe694fb8bc35241e3a9c332ec85627584fe3de201452b335"}, + {file = "propcache-0.5.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a473b3440261e0c60706e732b2ed2f517857344fc21bf48fdfe211e2d98eb285"}, + {file = "propcache-0.5.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7afa37062e6650640e932e4cc9297d81f9f42d9944029cc386b8247dea4da837"}, + {file = "propcache-0.5.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:8a90efd5777e996e42d568db9ac740b944d691e565cbfd31b2f7832f9184b2b8"}, + {file = "propcache-0.5.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:f19bb891234d72535764d703bfed1153cc34f4214d5bd7150aee1eec9e8f4366"}, + {file = "propcache-0.5.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:32775082acd2d807ee3db715c7770d38767b817870acfa08c29e057f3c4d5b56"}, + {file = "propcache-0.5.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9282fb1a3bccd038da9f768b927b24a0c753e466c086b7c4f3c6982851eefb2d"}, + {file = "propcache-0.5.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc49723e2f60d6b32a0f0b08a3fd6d13203c07f1cd9566cfce0f12a917c967a2"}, + {file = "propcache-0.5.2-cp314-cp314t-win32.whl", hash = "sha256:2d7aa89ebca5acc98cba9d1472d976e394782f587bad6661003602a619fd1821"}, + {file = "propcache-0.5.2-cp314-cp314t-win_amd64.whl", hash = "sha256:d447bb0b3054be5818458fbb171208b1d9ff11eba14e18ca18b90cbb45767370"}, + {file = "propcache-0.5.2-cp314-cp314t-win_arm64.whl", hash = "sha256:fe67a3d11cd9b4efabfa45c3d00ffba2b26811442a73a581a94b67c2b5faccf6"}, + {file = "propcache-0.5.2-py3-none-any.whl", hash = "sha256:be1ddfcbb376e3de5d2e2db1d58d6d67463e6b4f9f040c000de8e300295465fe"}, + {file = "propcache-0.5.2.tar.gz", hash = "sha256:01c4fc7480cd0598bb4b57022df55b9ca296da7fc5a8760bd8451a7e63a7d427"}, ] [[package]] @@ -1283,13 +1282,13 @@ six = ">=1.5" [[package]] name = "requests" -version = "2.33.1" +version = "2.34.1" description = "Python HTTP for Humans." optional = false python-versions = ">=3.10" files = [ - {file = "requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a"}, - {file = "requests-2.33.1.tar.gz", hash = "sha256:18817f8c57c6263968bc123d237e3b8b08ac046f5456bd1e307ee8f4250d3517"}, + {file = "requests-2.34.1-py3-none-any.whl", hash = "sha256:bf38a3ff993960d3dd819c08862c40b3c703306eb7c744fcd9f4ddbb95b548f0"}, + {file = "requests-2.34.1.tar.gz", hash = "sha256:0fc5669f2b69704449fe1552360bd2a73a54512dfd03e65529157f1513322beb"}, ] [package.dependencies] @@ -1398,24 +1397,24 @@ files = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20260408" +version = "2.9.0.20260508" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.10" files = [ - {file = "types_python_dateutil-2.9.0.20260408-py3-none-any.whl", hash = "sha256:473139d514a71c9d1fbd8bb328974bedcb1cc3dba57aad04ffa4157f483c216f"}, - {file = "types_python_dateutil-2.9.0.20260408.tar.gz", hash = "sha256:8b056ec01568674235f64ecbcef928972a5fac412f5aab09c516dfa2acfbb582"}, + {file = "types_python_dateutil-2.9.0.20260508-py3-none-any.whl", hash = "sha256:bfc6fd2d81aa86e5ac97206a64304f6bd247426eedbca9b98619bbc48c6a1c10"}, + {file = "types_python_dateutil-2.9.0.20260508.tar.gz", hash = "sha256:596a6d63d81f587bf04c8254fb78df9d2344e915ce67948d7400512e3a6206d5"}, ] [[package]] name = "types-requests" -version = "2.33.0.20260503" +version = "2.33.0.20260513" description = "Typing stubs for requests" optional = false python-versions = ">=3.10" files = [ - {file = "types_requests-2.33.0.20260503-py3-none-any.whl", hash = "sha256:02aaa7e3577a13471715bb1bddb693cc985ea514f754b503bf033e6a09a3e528"}, - {file = "types_requests-2.33.0.20260503.tar.gz", hash = "sha256:9721b2d9dbee7131f2fb39f20f0ebb1999c18cef4b512c9a7932f3722de7c5f4"}, + {file = "types_requests-2.33.0.20260513-py3-none-any.whl", hash = "sha256:d5a965f9d18b6e06b72039a69565de9027e58f36a7f709857da747fbe7521122"}, + {file = "types_requests-2.33.0.20260513.tar.gz", hash = "sha256:bd845450e954e751373d5d33526742592f298808a3ee3bda7e858e46b839b57f"}, ] [package.dependencies] @@ -1448,13 +1447,13 @@ typing-extensions = ">=4.12.0" [[package]] name = "urllib3" -version = "2.6.3" +version = "2.7.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, - {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, + {file = "urllib3-2.7.0-py3-none-any.whl", hash = "sha256:9fb4c81ebbb1ce9531cce37674bbc6f1360472bc18ca9a553ede278ef7276897"}, + {file = "urllib3-2.7.0.tar.gz", hash = "sha256:231e0ec3b63ceb14667c67be60f2f2c40a518cb38b03af60abc813da26505f4c"}, ] [package.extras] diff --git a/src/deepgram/__init__.py b/src/deepgram/__init__.py index 8eb79918..bab6a21c 100644 --- a/src/deepgram/__init__.py +++ b/src/deepgram/__init__.py @@ -46,6 +46,9 @@ CreateProjectDistributionCredentialsV1ResponseMember, CreateProjectInviteV1Response, Deepgram, + DeepgramListenProviderV1, + DeepgramListenProviderV2, + DeepgramListenProviderV2LanguageHint, DeepgramSpeakProviderModel, DeleteAgentConfigurationV1Response, DeleteAgentVariableV1Response, @@ -273,6 +276,9 @@ CreateProjectDistributionCredentialsV1ResponseMemberParams, CreateProjectDistributionCredentialsV1ResponseParams, CreateProjectInviteV1ResponseParams, + DeepgramListenProviderV1Params, + DeepgramListenProviderV2LanguageHintParams, + DeepgramListenProviderV2Params, DeepgramParams, DeleteProjectInviteV1ResponseParams, DeleteProjectKeyV1ResponseParams, @@ -485,6 +491,12 @@ "Deepgram": ".types", "DeepgramClient": ".client", "DeepgramClientEnvironment": ".environment", + "DeepgramListenProviderV1": ".types", + "DeepgramListenProviderV1Params": ".requests", + "DeepgramListenProviderV2": ".types", + "DeepgramListenProviderV2LanguageHint": ".types", + "DeepgramListenProviderV2LanguageHintParams": ".requests", + "DeepgramListenProviderV2Params": ".requests", "DeepgramParams": ".requests", "DeepgramSpeakProviderModel": ".types", "DefaultAioHttpClient": "._default_clients", @@ -923,6 +935,12 @@ def __dir__(): "Deepgram", "DeepgramClient", "DeepgramClientEnvironment", + "DeepgramListenProviderV1", + "DeepgramListenProviderV1Params", + "DeepgramListenProviderV2", + "DeepgramListenProviderV2LanguageHint", + "DeepgramListenProviderV2LanguageHintParams", + "DeepgramListenProviderV2Params", "DeepgramParams", "DeepgramSpeakProviderModel", "DefaultAioHttpClient", diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent.py index 45e9ac45..01bffc21 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. import typing_extensions - from .agent_v1settings_agent_context import AgentV1SettingsAgentContextParams from .agent_v1settings_agent_listen import AgentV1SettingsAgentListenParams from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeakParams diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py index fbbf448c..b399ef99 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py @@ -4,8 +4,8 @@ import typing_extensions from .agent_v1settings_agent_context_context import AgentV1SettingsAgentContextContextParams -from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItemParams from .agent_v1settings_agent_context_listen import AgentV1SettingsAgentContextListenParams +from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItemParams from .agent_v1settings_agent_context_speak import AgentV1SettingsAgentContextSpeakParams from .agent_v1settings_agent_context_think import AgentV1SettingsAgentContextThinkParams diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider.py index 7d5496dc..9f63b22a 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider.py @@ -5,9 +5,7 @@ import typing import typing_extensions -from .agent_v1settings_agent_context_listen_provider_v2language_hint import ( - AgentV1SettingsAgentContextListenProviderV2LanguageHintParams, -) +from ....requests.deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHintParams class AgentV1SettingsAgentContextListenProvider_V1Params(typing_extensions.TypedDict): @@ -23,7 +21,7 @@ class AgentV1SettingsAgentContextListenProvider_V2Params(typing_extensions.Typed version: typing.Literal["v2"] type: typing.Literal["deepgram"] model: str - language_hint: typing_extensions.NotRequired[AgentV1SettingsAgentContextListenProviderV2LanguageHintParams] + language_hint: typing_extensions.NotRequired[DeepgramListenProviderV2LanguageHintParams] keyterms: typing_extensions.NotRequired[typing.Sequence[str]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v1.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v1.py index 3769303c..3bdc0e70 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v1.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v1.py @@ -1,32 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....requests.deepgram_listen_provider_v1 import DeepgramListenProviderV1Params -import typing_extensions - - -class AgentV1SettingsAgentContextListenProviderV1Params(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - """ - Provider type for speech-to-text - """ - - model: typing_extensions.NotRequired[str] - """ - Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2) - """ - - language: typing_extensions.NotRequired[str] - """ - Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription - """ - - keyterms: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ - - smart_format: typing_extensions.NotRequired[bool] - """ - Applies smart formatting to improve transcript readability - """ +AgentV1SettingsAgentContextListenProviderV1Params = DeepgramListenProviderV1Params diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2.py index de1e286b..508fd4c1 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2.py @@ -1,30 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....requests.deepgram_listen_provider_v2 import DeepgramListenProviderV2Params -import typing_extensions -from .agent_v1settings_agent_context_listen_provider_v2language_hint import ( - AgentV1SettingsAgentContextListenProviderV2LanguageHintParams, -) - - -class AgentV1SettingsAgentContextListenProviderV2Params(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - """ - Provider type for speech-to-text - """ - - model: str - """ - Model to use for speech to text using the V2 API (e.g. flux-general-en, flux-general-multi) - """ - - language_hint: typing_extensions.NotRequired[AgentV1SettingsAgentContextListenProviderV2LanguageHintParams] - """ - One or more BCP-47 language codes to bias the model toward specific languages. Only supported when model is flux-general-multi. Without hints, the model auto-detects the spoken language. See the Language Prompting guide for details. - """ - - keyterms: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ +AgentV1SettingsAgentContextListenProviderV2Params = DeepgramListenProviderV2Params diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2language_hint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2language_hint.py index 9b359c1f..88a2fc39 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2language_hint.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_listen_provider_v2language_hint.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....requests.deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHintParams -AgentV1SettingsAgentContextListenProviderV2LanguageHintParams = typing.Union[str, typing.Sequence[str]] +AgentV1SettingsAgentContextListenProviderV2LanguageHintParams = DeepgramListenProviderV2LanguageHintParams diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py index 1b5b47e8..4f16ce88 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py @@ -5,6 +5,7 @@ import typing import typing_extensions +from ....requests.deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHintParams class AgentV1SettingsAgentListenProvider_V1Params(typing_extensions.TypedDict): @@ -20,6 +21,7 @@ class AgentV1SettingsAgentListenProvider_V2Params(typing_extensions.TypedDict): version: typing.Literal["v2"] type: typing.Literal["deepgram"] model: str + language_hint: typing_extensions.NotRequired[DeepgramListenProviderV2LanguageHintParams] keyterms: typing_extensions.NotRequired[typing.Sequence[str]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py index 0fd4e61d..2c8233da 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py @@ -1,32 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....requests.deepgram_listen_provider_v1 import DeepgramListenProviderV1Params -import typing_extensions - - -class AgentV1SettingsAgentListenProviderV1Params(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - """ - Provider type for speech-to-text - """ - - model: typing_extensions.NotRequired[str] - """ - Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2) - """ - - language: typing_extensions.NotRequired[str] - """ - Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription - """ - - keyterms: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ - - smart_format: typing_extensions.NotRequired[bool] - """ - Applies smart formatting to improve transcript readability - """ +AgentV1SettingsAgentListenProviderV1Params = DeepgramListenProviderV1Params diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py index 0e0e5c5a..13804411 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py @@ -1,22 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....requests.deepgram_listen_provider_v2 import DeepgramListenProviderV2Params -import typing_extensions - - -class AgentV1SettingsAgentListenProviderV2Params(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - """ - Provider type for speech-to-text - """ - - model: str - """ - Model to use for speech to text using the V2 API (e.g. flux-general-en) - """ - - keyterms: typing_extensions.NotRequired[typing.Sequence[str]] - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ +AgentV1SettingsAgentListenProviderV2Params = DeepgramListenProviderV2Params diff --git a/src/deepgram/agent/v1/settings/think/models/raw_client.py b/src/deepgram/agent/v1/settings/think/models/raw_client.py index 2680fe95..1e779edb 100644 --- a/src/deepgram/agent/v1/settings/think/models/raw_client.py +++ b/src/deepgram/agent/v1/settings/think/models/raw_client.py @@ -36,7 +36,7 @@ def list( """ _response = self._client_wrapper.httpx_client.request( "v1/agent/settings/think/models", - base_url=self._client_wrapper.get_environment().base, + base_url=self._client_wrapper.get_environment().agent, method="GET", request_options=request_options, ) @@ -93,7 +93,7 @@ async def list( """ _response = await self._client_wrapper.httpx_client.request( "v1/agent/settings/think/models", - base_url=self._client_wrapper.get_environment().base, + base_url=self._client_wrapper.get_environment().agent, method="GET", request_options=request_options, ) diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py index a3cec52d..776a2936 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py @@ -6,8 +6,8 @@ from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel from .agent_v1settings_agent_context_context import AgentV1SettingsAgentContextContext -from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItem from .agent_v1settings_agent_context_listen import AgentV1SettingsAgentContextListen +from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItem from .agent_v1settings_agent_context_speak import AgentV1SettingsAgentContextSpeak from .agent_v1settings_agent_context_think import AgentV1SettingsAgentContextThink diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider.py index af80f43c..0f8de520 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider.py @@ -8,9 +8,7 @@ import typing_extensions from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata -from .agent_v1settings_agent_context_listen_provider_v2language_hint import ( - AgentV1SettingsAgentContextListenProviderV2LanguageHint, -) +from ....types.deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHint class AgentV1SettingsAgentContextListenProvider_V1(UncheckedBaseModel): @@ -35,7 +33,7 @@ class AgentV1SettingsAgentContextListenProvider_V2(UncheckedBaseModel): version: typing.Literal["v2"] = "v2" type: typing.Literal["deepgram"] = "deepgram" model: str - language_hint: typing.Optional[AgentV1SettingsAgentContextListenProviderV2LanguageHint] = None + language_hint: typing.Optional[DeepgramListenProviderV2LanguageHint] = None keyterms: typing.Optional[typing.List[str]] = None if IS_PYDANTIC_V2: diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v1.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v1.py index 895d48d5..3d0223a1 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v1.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v1.py @@ -1,43 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....types.deepgram_listen_provider_v1 import DeepgramListenProviderV1 -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel - - -class AgentV1SettingsAgentContextListenProviderV1(UncheckedBaseModel): - type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") - """ - Provider type for speech-to-text - """ - - model: typing.Optional[str] = pydantic.Field(default=None) - """ - Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2) - """ - - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription - """ - - keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ - - smart_format: typing.Optional[bool] = pydantic.Field(default=None) - """ - Applies smart formatting to improve transcript readability - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow +AgentV1SettingsAgentContextListenProviderV1 = DeepgramListenProviderV1 diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2.py index f01caf99..b7b01cad 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2.py @@ -1,43 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....types.deepgram_listen_provider_v2 import DeepgramListenProviderV2 -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_context_listen_provider_v2language_hint import ( - AgentV1SettingsAgentContextListenProviderV2LanguageHint, -) - - -class AgentV1SettingsAgentContextListenProviderV2(UncheckedBaseModel): - type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") - """ - Provider type for speech-to-text - """ - - model: str = pydantic.Field() - """ - Model to use for speech to text using the V2 API (e.g. flux-general-en, flux-general-multi) - """ - - language_hint: typing.Optional[AgentV1SettingsAgentContextListenProviderV2LanguageHint] = pydantic.Field( - default=None - ) - """ - One or more BCP-47 language codes to bias the model toward specific languages. Only supported when model is flux-general-multi. Without hints, the model auto-detects the spoken language. See the Language Prompting guide for details. - """ - - keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow +AgentV1SettingsAgentContextListenProviderV2 = DeepgramListenProviderV2 diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2language_hint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2language_hint.py index 2e3e5e22..3c103b2b 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2language_hint.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_listen_provider_v2language_hint.py @@ -1,5 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....types.deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHint -AgentV1SettingsAgentContextListenProviderV2LanguageHint = typing.Union[str, typing.List[str]] +AgentV1SettingsAgentContextListenProviderV2LanguageHint = DeepgramListenProviderV2LanguageHint diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py index 9800b27d..02878ddd 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py @@ -8,6 +8,7 @@ import typing_extensions from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata +from ....types.deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHint class AgentV1SettingsAgentListenProvider_V1(UncheckedBaseModel): @@ -32,6 +33,7 @@ class AgentV1SettingsAgentListenProvider_V2(UncheckedBaseModel): version: typing.Literal["v2"] = "v2" type: typing.Literal["deepgram"] = "deepgram" model: str + language_hint: typing.Optional[DeepgramListenProviderV2LanguageHint] = None keyterms: typing.Optional[typing.List[str]] = None if IS_PYDANTIC_V2: diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py index 3370c965..712f20f0 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py @@ -1,43 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....types.deepgram_listen_provider_v1 import DeepgramListenProviderV1 -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel - - -class AgentV1SettingsAgentListenProviderV1(UncheckedBaseModel): - type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") - """ - Provider type for speech-to-text - """ - - model: typing.Optional[str] = pydantic.Field(default=None) - """ - Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2) - """ - - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription - """ - - keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ - - smart_format: typing.Optional[bool] = pydantic.Field(default=None) - """ - Applies smart formatting to improve transcript readability - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow +AgentV1SettingsAgentListenProviderV1 = DeepgramListenProviderV1 diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py index 5975d23e..9f9b75f5 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py @@ -1,33 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing +from ....types.deepgram_listen_provider_v2 import DeepgramListenProviderV2 -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel - - -class AgentV1SettingsAgentListenProviderV2(UncheckedBaseModel): - type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") - """ - Provider type for speech-to-text - """ - - model: str = pydantic.Field() - """ - Model to use for speech to text using the V2 API (e.g. flux-general-en) - """ - - keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None) - """ - Prompt keyterm recognition to improve Keyword Recall Rate - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow +AgentV1SettingsAgentListenProviderV2 = DeepgramListenProviderV2 diff --git a/src/deepgram/core/client_wrapper.py b/src/deepgram/core/client_wrapper.py index 80d2e267..9fd68149 100644 --- a/src/deepgram/core/client_wrapper.py +++ b/src/deepgram/core/client_wrapper.py @@ -28,12 +28,12 @@ def get_headers(self) -> typing.Dict[str, str]: import platform headers: typing.Dict[str, str] = { - "User-Agent": "deepgram-sdk/7.0.1", + "User-Agent": "deepgram-sdk/7.1.1", "X-Fern-Language": "Python", "X-Fern-Runtime": f"python/{platform.python_version()}", "X-Fern-Platform": f"{platform.system().lower()}/{platform.release()}", "X-Fern-SDK-Name": "deepgram-sdk", - "X-Fern-SDK-Version": "7.0.1", + "X-Fern-SDK-Version": "7.1.1", **(self.get_custom_headers() or {}), } headers["Authorization"] = f"Token {self.api_key}" diff --git a/src/deepgram/environment.py b/src/deepgram/environment.py index e514ef69..240d7ca4 100644 --- a/src/deepgram/environment.py +++ b/src/deepgram/environment.py @@ -7,15 +7,15 @@ class DeepgramClientEnvironment: PRODUCTION: DeepgramClientEnvironment AGENT: DeepgramClientEnvironment - def __init__(self, *, base: str, agent: str, production: str): + def __init__(self, *, base: str, production: str, agent: str): self.base = base - self.agent = agent self.production = production + self.agent = agent DeepgramClientEnvironment.PRODUCTION = DeepgramClientEnvironment( - base="https://api.deepgram.com", agent="wss://agent.deepgram.com", production="wss://api.deepgram.com" + base="https://api.deepgram.com", production="wss://api.deepgram.com", agent="wss://agent.deepgram.com" ) DeepgramClientEnvironment.AGENT = DeepgramClientEnvironment( - base="https://agent.deepgram.com", agent="wss://agent.deepgram.com", production="wss://api.deepgram.com" + base="https://agent.deepgram.com", production="wss://api.deepgram.com", agent="wss://agent.deepgram.com" ) diff --git a/src/deepgram/listen/v1/__init__.py b/src/deepgram/listen/v1/__init__.py index 8b43c4ff..c8e01a2f 100644 --- a/src/deepgram/listen/v1/__init__.py +++ b/src/deepgram/listen/v1/__init__.py @@ -29,6 +29,7 @@ MediaTranscribeRequestCallbackMethod, MediaTranscribeRequestCustomIntentMode, MediaTranscribeRequestCustomTopicMode, + MediaTranscribeRequestDiarizeModel, MediaTranscribeRequestEncoding, MediaTranscribeRequestModel, MediaTranscribeRequestSummarize, @@ -84,6 +85,7 @@ "MediaTranscribeRequestCallbackMethod": ".media", "MediaTranscribeRequestCustomIntentMode": ".media", "MediaTranscribeRequestCustomTopicMode": ".media", + "MediaTranscribeRequestDiarizeModel": ".media", "MediaTranscribeRequestEncoding": ".media", "MediaTranscribeRequestModel": ".media", "MediaTranscribeRequestSummarize": ".media", @@ -148,6 +150,7 @@ def __dir__(): "MediaTranscribeRequestCallbackMethod", "MediaTranscribeRequestCustomIntentMode", "MediaTranscribeRequestCustomTopicMode", + "MediaTranscribeRequestDiarizeModel", "MediaTranscribeRequestEncoding", "MediaTranscribeRequestModel", "MediaTranscribeRequestSummarize", diff --git a/src/deepgram/listen/v1/media/__init__.py b/src/deepgram/listen/v1/media/__init__.py index 495ed32d..12216674 100644 --- a/src/deepgram/listen/v1/media/__init__.py +++ b/src/deepgram/listen/v1/media/__init__.py @@ -10,6 +10,7 @@ MediaTranscribeRequestCallbackMethod, MediaTranscribeRequestCustomIntentMode, MediaTranscribeRequestCustomTopicMode, + MediaTranscribeRequestDiarizeModel, MediaTranscribeRequestEncoding, MediaTranscribeRequestModel, MediaTranscribeRequestSummarize, @@ -21,6 +22,7 @@ "MediaTranscribeRequestCallbackMethod": ".types", "MediaTranscribeRequestCustomIntentMode": ".types", "MediaTranscribeRequestCustomTopicMode": ".types", + "MediaTranscribeRequestDiarizeModel": ".types", "MediaTranscribeRequestEncoding": ".types", "MediaTranscribeRequestModel": ".types", "MediaTranscribeRequestSummarize": ".types", @@ -55,6 +57,7 @@ def __dir__(): "MediaTranscribeRequestCallbackMethod", "MediaTranscribeRequestCustomIntentMode", "MediaTranscribeRequestCustomTopicMode", + "MediaTranscribeRequestDiarizeModel", "MediaTranscribeRequestEncoding", "MediaTranscribeRequestModel", "MediaTranscribeRequestSummarize", diff --git a/src/deepgram/listen/v1/media/client.py b/src/deepgram/listen/v1/media/client.py index df47e724..2fb36847 100644 --- a/src/deepgram/listen/v1/media/client.py +++ b/src/deepgram/listen/v1/media/client.py @@ -8,6 +8,7 @@ from .types.media_transcribe_request_callback_method import MediaTranscribeRequestCallbackMethod from .types.media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode from .types.media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode +from .types.media_transcribe_request_diarize_model import MediaTranscribeRequestDiarizeModel from .types.media_transcribe_request_encoding import MediaTranscribeRequestEncoding from .types.media_transcribe_request_model import MediaTranscribeRequestModel from .types.media_transcribe_request_summarize import MediaTranscribeRequestSummarize @@ -52,6 +53,7 @@ def transcribe_url( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -127,6 +129,9 @@ def transcribe_url( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -221,6 +226,7 @@ def transcribe_url( detect_entities=True, detect_language=True, diarize=True, + diarize_model="latest", dictation=True, encoding="linear16", filler_words=True, @@ -262,6 +268,7 @@ def transcribe_url( detect_entities=detect_entities, detect_language=detect_language, diarize=diarize, + diarize_model=diarize_model, dictation=dictation, encoding=encoding, filler_words=filler_words, @@ -306,6 +313,7 @@ def transcribe_file( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -381,6 +389,9 @@ def transcribe_file( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -478,6 +489,7 @@ def transcribe_file( detect_entities=detect_entities, detect_language=detect_language, diarize=diarize, + diarize_model=diarize_model, dictation=dictation, encoding=encoding, filler_words=filler_words, @@ -538,6 +550,7 @@ async def transcribe_url( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -613,6 +626,9 @@ async def transcribe_url( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -712,6 +728,7 @@ async def main() -> None: detect_entities=True, detect_language=True, diarize=True, + diarize_model="latest", dictation=True, encoding="linear16", filler_words=True, @@ -756,6 +773,7 @@ async def main() -> None: detect_entities=detect_entities, detect_language=detect_language, diarize=diarize, + diarize_model=diarize_model, dictation=dictation, encoding=encoding, filler_words=filler_words, @@ -800,6 +818,7 @@ async def transcribe_file( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -875,6 +894,9 @@ async def transcribe_file( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -980,6 +1002,7 @@ async def main() -> None: detect_entities=detect_entities, detect_language=detect_language, diarize=diarize, + diarize_model=diarize_model, dictation=dictation, encoding=encoding, filler_words=filler_words, diff --git a/src/deepgram/listen/v1/media/raw_client.py b/src/deepgram/listen/v1/media/raw_client.py index 68860bdc..9a89052a 100644 --- a/src/deepgram/listen/v1/media/raw_client.py +++ b/src/deepgram/listen/v1/media/raw_client.py @@ -13,6 +13,7 @@ from .types.media_transcribe_request_callback_method import MediaTranscribeRequestCallbackMethod from .types.media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode from .types.media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode +from .types.media_transcribe_request_diarize_model import MediaTranscribeRequestDiarizeModel from .types.media_transcribe_request_encoding import MediaTranscribeRequestEncoding from .types.media_transcribe_request_model import MediaTranscribeRequestModel from .types.media_transcribe_request_summarize import MediaTranscribeRequestSummarize @@ -47,6 +48,7 @@ def transcribe_url( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -122,6 +124,9 @@ def transcribe_url( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -213,6 +218,7 @@ def transcribe_url( "detect_entities": detect_entities, "detect_language": detect_language, "diarize": diarize, + "diarize_model": diarize_model, "dictation": dictation, "encoding": encoding, "filler_words": filler_words, @@ -293,6 +299,7 @@ def transcribe_file( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -368,6 +375,9 @@ def transcribe_file( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -459,6 +469,7 @@ def transcribe_file( "detect_entities": detect_entities, "detect_language": detect_language, "diarize": diarize, + "diarize_model": diarize_model, "dictation": dictation, "encoding": encoding, "filler_words": filler_words, @@ -542,6 +553,7 @@ async def transcribe_url( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -617,6 +629,9 @@ async def transcribe_url( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -708,6 +723,7 @@ async def transcribe_url( "detect_entities": detect_entities, "detect_language": detect_language, "diarize": diarize, + "diarize_model": diarize_model, "dictation": dictation, "encoding": encoding, "filler_words": filler_words, @@ -788,6 +804,7 @@ async def transcribe_file( detect_entities: typing.Optional[bool] = None, detect_language: typing.Optional[bool] = None, diarize: typing.Optional[bool] = None, + diarize_model: typing.Optional[MediaTranscribeRequestDiarizeModel] = None, dictation: typing.Optional[bool] = None, encoding: typing.Optional[MediaTranscribeRequestEncoding] = None, filler_words: typing.Optional[bool] = None, @@ -863,6 +880,9 @@ async def transcribe_file( diarize : typing.Optional[bool] Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + diarize_model : typing.Optional[MediaTranscribeRequestDiarizeModel] + Select and enable a specific batch diarization model version. If specifying this parameter, you should not set the deprecated `diarize=true` parameter. Not accepted on streaming requests. + dictation : typing.Optional[bool] Dictation mode for controlling formatting with dictated speech @@ -954,6 +974,7 @@ async def transcribe_file( "detect_entities": detect_entities, "detect_language": detect_language, "diarize": diarize, + "diarize_model": diarize_model, "dictation": dictation, "encoding": encoding, "filler_words": filler_words, diff --git a/src/deepgram/listen/v1/media/types/__init__.py b/src/deepgram/listen/v1/media/types/__init__.py index d0ff0fb9..36acf609 100644 --- a/src/deepgram/listen/v1/media/types/__init__.py +++ b/src/deepgram/listen/v1/media/types/__init__.py @@ -9,6 +9,7 @@ from .media_transcribe_request_callback_method import MediaTranscribeRequestCallbackMethod from .media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode from .media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode + from .media_transcribe_request_diarize_model import MediaTranscribeRequestDiarizeModel from .media_transcribe_request_encoding import MediaTranscribeRequestEncoding from .media_transcribe_request_model import MediaTranscribeRequestModel from .media_transcribe_request_summarize import MediaTranscribeRequestSummarize @@ -18,6 +19,7 @@ "MediaTranscribeRequestCallbackMethod": ".media_transcribe_request_callback_method", "MediaTranscribeRequestCustomIntentMode": ".media_transcribe_request_custom_intent_mode", "MediaTranscribeRequestCustomTopicMode": ".media_transcribe_request_custom_topic_mode", + "MediaTranscribeRequestDiarizeModel": ".media_transcribe_request_diarize_model", "MediaTranscribeRequestEncoding": ".media_transcribe_request_encoding", "MediaTranscribeRequestModel": ".media_transcribe_request_model", "MediaTranscribeRequestSummarize": ".media_transcribe_request_summarize", @@ -51,6 +53,7 @@ def __dir__(): "MediaTranscribeRequestCallbackMethod", "MediaTranscribeRequestCustomIntentMode", "MediaTranscribeRequestCustomTopicMode", + "MediaTranscribeRequestDiarizeModel", "MediaTranscribeRequestEncoding", "MediaTranscribeRequestModel", "MediaTranscribeRequestSummarize", diff --git a/src/deepgram/listen/v1/media/types/media_transcribe_request_diarize_model.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_diarize_model.py new file mode 100644 index 00000000..b99866b7 --- /dev/null +++ b/src/deepgram/listen/v1/media/types/media_transcribe_request_diarize_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +MediaTranscribeRequestDiarizeModel = typing.Union[typing.Literal["latest", "v1", "v2"], typing.Any] diff --git a/src/deepgram/requests/__init__.py b/src/deepgram/requests/__init__.py index 78cd5f29..c1380c47 100644 --- a/src/deepgram/requests/__init__.py +++ b/src/deepgram/requests/__init__.py @@ -39,6 +39,9 @@ ) from .create_project_invite_v1response import CreateProjectInviteV1ResponseParams from .deepgram import DeepgramParams + from .deepgram_listen_provider_v1 import DeepgramListenProviderV1Params + from .deepgram_listen_provider_v2 import DeepgramListenProviderV2Params + from .deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHintParams from .delete_project_invite_v1response import DeleteProjectInviteV1ResponseParams from .delete_project_key_v1response import DeleteProjectKeyV1ResponseParams from .delete_project_member_v1response import DeleteProjectMemberV1ResponseParams @@ -244,6 +247,9 @@ "CreateProjectDistributionCredentialsV1ResponseMemberParams": ".create_project_distribution_credentials_v1response_member", "CreateProjectDistributionCredentialsV1ResponseParams": ".create_project_distribution_credentials_v1response", "CreateProjectInviteV1ResponseParams": ".create_project_invite_v1response", + "DeepgramListenProviderV1Params": ".deepgram_listen_provider_v1", + "DeepgramListenProviderV2LanguageHintParams": ".deepgram_listen_provider_v2language_hint", + "DeepgramListenProviderV2Params": ".deepgram_listen_provider_v2", "DeepgramParams": ".deepgram", "DeleteProjectInviteV1ResponseParams": ".delete_project_invite_v1response", "DeleteProjectKeyV1ResponseParams": ".delete_project_key_v1response", @@ -436,6 +442,9 @@ def __dir__(): "CreateProjectDistributionCredentialsV1ResponseMemberParams", "CreateProjectDistributionCredentialsV1ResponseParams", "CreateProjectInviteV1ResponseParams", + "DeepgramListenProviderV1Params", + "DeepgramListenProviderV2LanguageHintParams", + "DeepgramListenProviderV2Params", "DeepgramParams", "DeleteProjectInviteV1ResponseParams", "DeleteProjectKeyV1ResponseParams", diff --git a/src/deepgram/requests/deepgram_listen_provider_v1.py b/src/deepgram/requests/deepgram_listen_provider_v1.py new file mode 100644 index 00000000..282b0d50 --- /dev/null +++ b/src/deepgram/requests/deepgram_listen_provider_v1.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class DeepgramListenProviderV1Params(typing_extensions.TypedDict): + type: typing.Literal["deepgram"] + """ + Provider type for speech-to-text + """ + + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + Specifies usage of the V1 Deepgram speech-to-text API + """ + + model: typing_extensions.NotRequired[str] + """ + Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2) + """ + + language: typing_extensions.NotRequired[str] + """ + Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription + """ + + keyterms: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Prompt keyterm recognition to improve Keyword Recall Rate + """ + + smart_format: typing_extensions.NotRequired[bool] + """ + Applies smart formatting to improve transcript readability + """ diff --git a/src/deepgram/requests/deepgram_listen_provider_v2.py b/src/deepgram/requests/deepgram_listen_provider_v2.py new file mode 100644 index 00000000..be90ac69 --- /dev/null +++ b/src/deepgram/requests/deepgram_listen_provider_v2.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHintParams + + +class DeepgramListenProviderV2Params(typing_extensions.TypedDict): + type: typing.Literal["deepgram"] + """ + Provider type for speech-to-text + """ + + version: typing_extensions.NotRequired[typing.Literal["v2"]] + """ + Specifies usage of the V2 Deepgram speech-to-text API (e.g. Flux) + """ + + model: str + """ + Model to use for speech to text using the V2 API (e.g. flux-general-en, flux-general-multi) + """ + + language_hint: typing_extensions.NotRequired[DeepgramListenProviderV2LanguageHintParams] + """ + One or more BCP-47 language codes to bias the model toward specific languages. Only supported when model is flux-general-multi. Without hints, the model auto-detects the spoken language. See the Language Prompting guide for details. + """ + + keyterms: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Prompt keyterm recognition to improve Keyword Recall Rate + """ diff --git a/src/deepgram/requests/deepgram_listen_provider_v2language_hint.py b/src/deepgram/requests/deepgram_listen_provider_v2language_hint.py new file mode 100644 index 00000000..b48d2a89 --- /dev/null +++ b/src/deepgram/requests/deepgram_listen_provider_v2language_hint.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DeepgramListenProviderV2LanguageHintParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/deepgram/requests/groq.py b/src/deepgram/requests/groq.py index c2de055e..2f31525f 100644 --- a/src/deepgram/requests/groq.py +++ b/src/deepgram/requests/groq.py @@ -25,5 +25,5 @@ class GroqParams(typing_extensions.TypedDict): reasoning_mode: typing_extensions.NotRequired[GroqThinkProviderReasoningMode] """ - Groq reasoning mode + OpenAI reasoning_effort """ diff --git a/src/deepgram/types/__init__.py b/src/deepgram/types/__init__.py index 611438fd..34ae8481 100644 --- a/src/deepgram/types/__init__.py +++ b/src/deepgram/types/__init__.py @@ -49,6 +49,9 @@ ) from .create_project_invite_v1response import CreateProjectInviteV1Response from .deepgram import Deepgram + from .deepgram_listen_provider_v1 import DeepgramListenProviderV1 + from .deepgram_listen_provider_v2 import DeepgramListenProviderV2 + from .deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHint from .deepgram_speak_provider_model import DeepgramSpeakProviderModel from .delete_agent_configuration_v1response import DeleteAgentConfigurationV1Response from .delete_agent_variable_v1response import DeleteAgentVariableV1Response @@ -316,6 +319,9 @@ "CreateProjectDistributionCredentialsV1ResponseMember": ".create_project_distribution_credentials_v1response_member", "CreateProjectInviteV1Response": ".create_project_invite_v1response", "Deepgram": ".deepgram", + "DeepgramListenProviderV1": ".deepgram_listen_provider_v1", + "DeepgramListenProviderV2": ".deepgram_listen_provider_v2", + "DeepgramListenProviderV2LanguageHint": ".deepgram_listen_provider_v2language_hint", "DeepgramSpeakProviderModel": ".deepgram_speak_provider_model", "DeleteAgentConfigurationV1Response": ".delete_agent_configuration_v1response", "DeleteAgentVariableV1Response": ".delete_agent_variable_v1response", @@ -573,6 +579,9 @@ def __dir__(): "CreateProjectDistributionCredentialsV1ResponseMember", "CreateProjectInviteV1Response", "Deepgram", + "DeepgramListenProviderV1", + "DeepgramListenProviderV2", + "DeepgramListenProviderV2LanguageHint", "DeepgramSpeakProviderModel", "DeleteAgentConfigurationV1Response", "DeleteAgentVariableV1Response", diff --git a/src/deepgram/types/deepgram_listen_provider_v1.py b/src/deepgram/types/deepgram_listen_provider_v1.py new file mode 100644 index 00000000..7bc85007 --- /dev/null +++ b/src/deepgram/types/deepgram_listen_provider_v1.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class DeepgramListenProviderV1(UncheckedBaseModel): + type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") + """ + Provider type for speech-to-text + """ + + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + Specifies usage of the V1 Deepgram speech-to-text API + """ + + model: typing.Optional[str] = pydantic.Field(default=None) + """ + Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2) + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription + """ + + keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Prompt keyterm recognition to improve Keyword Recall Rate + """ + + smart_format: typing.Optional[bool] = pydantic.Field(default=None) + """ + Applies smart formatting to improve transcript readability + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/deepgram_listen_provider_v2.py b/src/deepgram/types/deepgram_listen_provider_v2.py new file mode 100644 index 00000000..83cf6f7e --- /dev/null +++ b/src/deepgram/types/deepgram_listen_provider_v2.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .deepgram_listen_provider_v2language_hint import DeepgramListenProviderV2LanguageHint + + +class DeepgramListenProviderV2(UncheckedBaseModel): + type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") + """ + Provider type for speech-to-text + """ + + version: typing.Optional[typing.Literal["v2"]] = pydantic.Field(default=None) + """ + Specifies usage of the V2 Deepgram speech-to-text API (e.g. Flux) + """ + + model: str = pydantic.Field() + """ + Model to use for speech to text using the V2 API (e.g. flux-general-en, flux-general-multi) + """ + + language_hint: typing.Optional[DeepgramListenProviderV2LanguageHint] = pydantic.Field(default=None) + """ + One or more BCP-47 language codes to bias the model toward specific languages. Only supported when model is flux-general-multi. Without hints, the model auto-detects the spoken language. See the Language Prompting guide for details. + """ + + keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Prompt keyterm recognition to improve Keyword Recall Rate + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/deepgram_listen_provider_v2language_hint.py b/src/deepgram/types/deepgram_listen_provider_v2language_hint.py new file mode 100644 index 00000000..184371b6 --- /dev/null +++ b/src/deepgram/types/deepgram_listen_provider_v2language_hint.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DeepgramListenProviderV2LanguageHint = typing.Union[str, typing.List[str]] diff --git a/src/deepgram/types/groq.py b/src/deepgram/types/groq.py index a3962e43..fa0350e7 100644 --- a/src/deepgram/types/groq.py +++ b/src/deepgram/types/groq.py @@ -27,7 +27,7 @@ class Groq(UncheckedBaseModel): reasoning_mode: typing.Optional[GroqThinkProviderReasoningMode] = pydantic.Field(default=None) """ - Groq reasoning mode + OpenAI reasoning_effort """ if IS_PYDANTIC_V2: diff --git a/tests/custom/test_compat_aliases.py b/tests/custom/test_compat_aliases.py index 06e4bae5..ef094b6b 100644 --- a/tests/custom/test_compat_aliases.py +++ b/tests/custom/test_compat_aliases.py @@ -303,3 +303,45 @@ def test_agent_settings_audio_output_container_still_accepts_arbitrary_strings() output = AgentV1SettingsAudioOutput(container="webm") assert output.container == "webm" + + +def test_listen_provider_type_aliases_resolve_to_deepgram_top_level() -> None: + from deepgram.agent.v1.types import ( + AgentV1SettingsAgentContextListenProviderV1, + AgentV1SettingsAgentContextListenProviderV2, + AgentV1SettingsAgentContextListenProviderV2LanguageHint, + AgentV1SettingsAgentListenProviderV1, + AgentV1SettingsAgentListenProviderV2, + ) + from deepgram.types import ( + DeepgramListenProviderV1, + DeepgramListenProviderV2, + DeepgramListenProviderV2LanguageHint, + ) + + assert AgentV1SettingsAgentContextListenProviderV1 is DeepgramListenProviderV1 + assert AgentV1SettingsAgentContextListenProviderV2 is DeepgramListenProviderV2 + assert AgentV1SettingsAgentContextListenProviderV2LanguageHint is DeepgramListenProviderV2LanguageHint + assert AgentV1SettingsAgentListenProviderV1 is DeepgramListenProviderV1 + assert AgentV1SettingsAgentListenProviderV2 is DeepgramListenProviderV2 + + +def test_listen_provider_request_aliases_resolve_to_deepgram_top_level() -> None: + from deepgram.agent.v1.requests import ( + AgentV1SettingsAgentContextListenProviderV1Params, + AgentV1SettingsAgentContextListenProviderV2LanguageHintParams, + AgentV1SettingsAgentContextListenProviderV2Params, + AgentV1SettingsAgentListenProviderV1Params, + AgentV1SettingsAgentListenProviderV2Params, + ) + from deepgram.requests import ( + DeepgramListenProviderV1Params, + DeepgramListenProviderV2LanguageHintParams, + DeepgramListenProviderV2Params, + ) + + assert AgentV1SettingsAgentContextListenProviderV1Params is DeepgramListenProviderV1Params + assert AgentV1SettingsAgentContextListenProviderV2Params is DeepgramListenProviderV2Params + assert AgentV1SettingsAgentContextListenProviderV2LanguageHintParams is DeepgramListenProviderV2LanguageHintParams + assert AgentV1SettingsAgentListenProviderV1Params is DeepgramListenProviderV1Params + assert AgentV1SettingsAgentListenProviderV2Params is DeepgramListenProviderV2Params diff --git a/tests/wire/conftest.py b/tests/wire/conftest.py index dcbbbfb1..2d832062 100644 --- a/tests/wire/conftest.py +++ b/tests/wire/conftest.py @@ -45,13 +45,13 @@ def get_client(test_id: str) -> DeepgramClient: if _CLIENT_SUPPORTS_HEADERS: return DeepgramClient( - environment=DeepgramClientEnvironment(base=base_url, agent=base_url, production=base_url), + environment=DeepgramClientEnvironment(base=base_url, production=base_url, agent=base_url), headers=test_headers, api_key="test_api_key", ) return DeepgramClient( - environment=DeepgramClientEnvironment(base=base_url, agent=base_url, production=base_url), + environment=DeepgramClientEnvironment(base=base_url, production=base_url, agent=base_url), httpx_client=httpx.Client(headers=test_headers), api_key="test_api_key", ) diff --git a/tests/wire/test_listen_v1_media.py b/tests/wire/test_listen_v1_media.py index af68c86c..6150ace0 100644 --- a/tests/wire/test_listen_v1_media.py +++ b/tests/wire/test_listen_v1_media.py @@ -21,6 +21,7 @@ def test_listen_v1_media_transcribe_url() -> None: detect_entities=True, detect_language=True, diarize=True, + diarize_model="latest", dictation=True, encoding="linear16", filler_words=True, @@ -64,6 +65,7 @@ def test_listen_v1_media_transcribe_url() -> None: "detect_entities": "true", "detect_language": "true", "diarize": "true", + "diarize_model": "latest", "dictation": "true", "encoding": "linear16", "filler_words": "true", diff --git a/tests/wire/test_manage_v1_projects_keys.py b/tests/wire/test_manage_v1_projects_keys.py index de2ded55..cf232369 100644 --- a/tests/wire/test_manage_v1_projects_keys.py +++ b/tests/wire/test_manage_v1_projects_keys.py @@ -1,7 +1,7 @@ -from deepgram.requests import CreateKeyV1RequestOneParams - from .conftest import get_client, verify_request_count +from deepgram.requests import CreateKeyV1RequestOneParams + def test_manage_v1_projects_keys_list_() -> None: """Test list endpoint with WireMock""" diff --git a/wiremock/wiremock-mappings.json b/wiremock/wiremock-mappings.json index d6a735c2..4b3fec7e 100644 --- a/wiremock/wiremock-mappings.json +++ b/wiremock/wiremock-mappings.json @@ -1,20 +1,20 @@ { "mappings": [ { - "id": "533b5d52-ab21-4763-aaae-87cf52f49aa5", - "name": "List Agent Think Models - default", + "id": "0fe59c56-e953-447e-9569-95754dccc9dd", + "name": "List Agent Think Models - List supported models", "request": { "urlPathTemplate": "/v1/agent/settings/think/models", "method": "GET" }, "response": { "status": 200, - "body": "{\n \"models\": [\n {\n \"id\": \"gpt-5\",\n \"name\": \"name\",\n \"provider\": \"open_ai\"\n }\n ]\n}", + "body": "{\n \"models\": [\n {\n \"id\": \"gpt-5\",\n \"name\": \"GPT-5\",\n \"provider\": \"open_ai\"\n }\n ]\n}", "headers": { "Content-Type": "application/json" } }, - "uuid": "533b5d52-ab21-4763-aaae-87cf52f49aa5", + "uuid": "0fe59c56-e953-447e-9569-95754dccc9dd", "persistent": true, "priority": 3, "metadata": { @@ -115,6 +115,9 @@ "diarize": { "equalTo": "true" }, + "diarize_model": { + "equalTo": "latest" + }, "dictation": { "equalTo": "true" },