{"object":"list","data":[{"id":"openai/gpt-4o","type":"chat-completion","info":{"name":"GPT 4o","developer":"Open AI","description":"Multimodal AI model by OpenAI enhancing human-computer interaction.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/chat-gpt-4-omni","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4o-2024-08-06","type":"chat-completion","info":{"name":"GPT 4o 2024-08-06","developer":"Open AI","description":"Multimodal AI model by OpenAI enhancing human-computer interaction.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/gpt-4o-2024-08-06-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4o-2024-05-13","type":"chat-completion","info":{"name":"GPT 4o 2024-05-13","developer":"Open AI","description":"Multimodal AI model by OpenAI enhancing human-computer interaction.","contextLength":128000,"maxTokens":4096,"url":"https://aimlapi.com/models/gpt-4o-2024-05-13-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4o-mini","type":"chat-completion","info":{"name":"GPT 4o mini","developer":"Open AI","description":"GPT-4o Mini: Cost-efficient, advanced model for diverse AI applications.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/chat-gpt-4o-mini","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o-mini"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4o-mini-2024-07-18","type":"chat-completion","info":{"name":"GPT 4o mini 2024-07-18","developer":"Open AI","description":"GPT-4o Mini: Cost-efficient, advanced model for diverse AI applications.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/chat-gpt-4o-mini","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o-mini"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4-turbo","type":"chat-completion","info":{"name":"GPT 4 turbo","developer":"Open AI","description":"High-speed AI model for instant language processing. API for ChatGPT 4 Turbo.","contextLength":128000,"maxTokens":4096,"url":"https://aimlapi.com/models/chat-gpt-4-turbo","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4-turbo"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4-turbo-2024-04-09","type":"chat-completion","info":{"name":"GPT 4 turbo 2024-04-09","developer":"Open AI","description":"High-speed AI model for instant language processing. API for ChatGPT 4 Turbo.","contextLength":128000,"maxTokens":4096,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4-turbo"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4","type":"chat-completion","info":{"name":"GPT 4","developer":"Open AI","description":"Revolutionary AI model for unparalleled natural language interaction. API for ChatGPT 4.","contextLength":8000,"maxTokens":8192,"url":"https://aimlapi.com/models/chat-gpt-4","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4-0125-preview","type":"chat-completion","info":{"name":"GPT 4 0125 preview","developer":"Open AI","description":"Older preview release of GPT-4 with extended context and improved instruction following over the base GPT-4.","contextLength":8000,"maxTokens":4096,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4-1106-preview","type":"chat-completion","info":{"name":"GPT 4 1106 preview","developer":"Open AI","description":"Older preview release of GPT-4 with extended context and improved instruction following over the base GPT-4.","contextLength":8000,"maxTokens":4096,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-3.5-turbo","type":"chat-completion","info":{"name":"GPT 3.5 turbo","developer":"Open AI","description":"Fast and cost-efficient OpenAI model for a wide range of text generation and conversation tasks.","contextLength":16000,"maxTokens":4096,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-3.5-turbo"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-3.5-turbo-0125","type":"chat-completion","info":{"name":"GPT 3.5 turbo 0125","developer":"Open AI","description":"The newest GPT-3.5 Turbo with improved accuracy and encoding.","contextLength":16000,"maxTokens":4096,"url":"https://aimlapi.com/models/chat-gpt-3-5-turbo-0125","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-3.5-turbo"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-3.5-turbo-1106","type":"chat-completion","info":{"name":"GPT 3.5 turbo 1106","developer":"Open AI","description":"The GPT-3.5 Turbo 1106 model features enhanced instruction adherence and a JSON mode","contextLength":16000,"maxTokens":4096,"url":"https://aimlapi.com/models/chat-gpt-3-5-turbo-1106","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-3.5-turbo"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"o3-mini","type":"chat-completion","info":{"name":"o3 mini","developer":"Open AI","description":"OpenAI o3-mini excels in reasoning tasks with advanced features like deliberative alignment and extensive context support.","contextLength":200000,"maxTokens":100000,"url":"https://aimlapi.com/models/openai-o3-mini-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/o3-mini"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"gpt-4o-audio-preview","type":"chat-completion","info":{"name":"Chat GPT 4o audio preview","developer":"Open AI","description":"GPT-4o Audio Preview is OpenAI's latest flagship model capable of understanding and generating text and audio in real-time, designed for natural conversation and auditory tasks.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/gpt-4o-audio-preview-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o-audio-preview"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.audio"],"endpoints":["/v1/chat/completions"]},{"id":"gpt-4o-mini-audio-preview","type":"chat-completion","info":{"name":"Chat GPT 4o mini audio preview","developer":"Open AI","description":"GPT-4o Mini Audio adds speech-to-text and text-to-speech abilities to the efficient GPT-4o Mini model, optimized for voice interfaces in smaller applications.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/gpt-4o-mini-audio-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o-mini-audio-preview"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.audio"],"endpoints":["/v1/chat/completions"]},{"id":"openai/gpt-audio","type":"chat-completion","info":{"name":"Chat GPT audio","developer":"Open AI","description":"GPT Audio is OpenAI's latest flagship model capable of understanding and generating text and audio in real-time, designed for natural conversation and auditory tasks.","contextLength":128000,"maxTokens":16384,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-audio"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.audio"],"endpoints":["/v1/chat/completions"]},{"id":"openai/gpt-audio-mini","type":"chat-completion","info":{"name":"Chat GPT mini audio","developer":"Open AI","description":"GPT Mini Audio adds speech-to-text and text-to-speech abilities to the efficient GPT Mini model, optimized for voice interfaces in smaller applications.","contextLength":128000,"maxTokens":16384,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-audio-mini"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.audio"],"endpoints":["/v1/chat/completions"]},{"id":"gpt-4o-search-preview","type":"chat-completion","info":{"name":"Chat GPT 4o search preview","developer":"Open AI","description":"GPT-4o Search Preview blends OpenAI's GPT-4o capabilities with live web search results, allowing users to get up-to-date answers grounded in real-time data.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/gpt-4o-search-preview-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o-search-preview"},"features":["openai/chat-completion","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"gpt-4o-mini-search-preview","type":"chat-completion","info":{"name":"Chat GPT 4o mini search preview","developer":"Open AI","description":"GPT-4o Mini Search Preview is a more efficient variant of GPT-4o that includes search capabilities while being optimized for speed and affordability.","contextLength":128000,"maxTokens":16384,"url":"https://aimlapi.com/models/gpt-4o-mini-search-preview-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4o-mini-search-preview"},"features":["openai/chat-completion","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"openai/gpt-4.1-2025-04-14","type":"chat-completion","info":{"name":"Chat GPT 4.1","developer":"Open AI","description":"","contextLength":1000000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4.1"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-4.1-mini-2025-04-14","type":"chat-completion","info":{"name":"Chat GPT 4.1 mini","developer":"Open AI","description":"","contextLength":1000000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4.1-mini"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-4.1-nano-2025-04-14","type":"chat-completion","info":{"name":"Chat GPT 4.1 nano","developer":"Open AI","description":"","contextLength":1000000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-4.1-nano"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.temperature","openai/chat-completion.top-p"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/o4-mini-2025-04-16","type":"chat-completion","info":{"name":"o4-mini","developer":"Open AI","description":"","contextLength":200000,"maxTokens":100000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/o4-mini"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/o3-2025-04-16","type":"chat-completion","info":{"name":"o3","developer":"Open AI","description":"","contextLength":200000,"maxTokens":100000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/o3"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.function","openai/chat-completion.vision","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.number-of-messages","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"o1","type":"chat-completion","info":{"name":"o1","developer":"Open AI","description":"OpenAI o1 excels in complex reasoning tasks with advanced features like chain-of-thought processing and extensive context support.","contextLength":200000,"maxTokens":100000,"url":"https://aimlapi.com/models/openai-o1-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/o1"},"features":["openai/chat-completion","openai/response-api","openai/chat-assistant","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-2025-08-07","type":"chat-completion","info":{"name":"GPT-5","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-mini-2025-08-07","type":"chat-completion","info":{"name":"GPT-5 mini","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5-mini"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-nano-2025-08-07","type":"chat-completion","info":{"name":"GPT-5 nano","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5-nano"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-chat-latest","type":"chat-completion","info":{"name":"GPT-5 Chat","developer":"Open AI","description":"","contextLength":400000,"maxTokens":16384,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5-chat"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-1","type":"chat-completion","info":{"name":"GPT-5.1","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.1"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.number-of-messages","openai/chat-completion.logprobs","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-1-chat-latest","type":"chat-completion","info":{"name":"GPT-5.1 Chat Latest","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.1"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.seed","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-2","type":"chat-completion","info":{"name":"GPT-5.2","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.2"},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.number-of-messages","openai/chat-completion.logprobs","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/gpt-5-2-chat-latest","type":"chat-completion","info":{"name":"GPT-5.2 Chat Latest","developer":"Open AI","description":"","contextLength":128000,"maxTokens":16000,"url":"","docs_url":""},"features":["openai/chat-completion","openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.seed","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"openai/o3-pro","type":"responses","info":{"name":"o3-pro","developer":"Open AI","description":"","contextLength":200000,"maxTokens":100000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/o3-pro"},"features":["openai/response-api","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.seed","openai/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/responses"]},{"id":"openai/gpt-5-pro","type":"responses","info":{"name":"GPT 5 Pro","developer":"Open AI","description":"","contextLength":400000,"maxTokens":272000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5"},"features":["openai/response-api"],"endpoints":["/v1/responses"]},{"id":"openai/gpt-5-1-codex","type":"responses","info":{"name":"GPT-5.1 Codex","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.1"},"features":["openai/response-api"],"endpoints":["/v1/responses"]},{"id":"openai/gpt-5-1-codex-mini","type":"responses","info":{"name":"GPT-5.1 Codex Mini","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.1"},"features":["openai/response-api"],"endpoints":["/v1/responses"]},{"id":"openai/gpt-5-2-codex","type":"responses","info":{"name":"GPT-5.2 Codex","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.2"},"features":["openai/response-api"],"endpoints":["/v1/responses"]},{"id":"openai/gpt-5-3-codex","type":"responses","info":{"name":"GPT-5.3 Codex","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-5.3-codex"},"features":["openai/response-api"],"endpoints":["/v1/responses"]},{"id":"openai/gpt-5-2-pro","type":"responses","info":{"name":"GPT-5.2 Pro","developer":"Open AI","description":"","contextLength":400000,"maxTokens":128000,"url":"","docs_url":""},"features":["openai/response-api"],"endpoints":["/v1/chat/completions","/v1/responses"]},{"id":"mistralai/Mixtral-8x7B-Instruct-v0.1","type":"chat-completion","info":{"name":"Mixtral 8x7B Instruct v0.1","developer":"Mistral AI","description":"Harness the power of tailored AI with Mixtral-8x7B Instruct v0.1 API","contextLength":64000,"maxTokens":32768,"url":"https://aimlapi.com/models/mixtral-8x7b-instruct-v01","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/mistral-ai/mixtral-8x7b-instruct-v0.1"},"features":["openai/chat-completion.max-completion-tokens","openai/chat-completion","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty"],"endpoints":["/v1/chat/completions"]},{"id":"meta-llama/Llama-3.3-70B-Instruct-Turbo","type":"chat-completion","info":{"name":"Llama 3.3 70B Instruct Turbo","developer":"Meta","description":"Meta Llama 3.3 70B Instruct Turbo is an advanced language model optimized for instruction-following tasks with high efficiency and performance.","contextLength":128000,"maxTokens":127000,"url":"https://aimlapi.com/models/meta-llama-3-3-70b-instruct-turbo-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/meta/llama-3.3-70b-instruct-turbo"},"features":["openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","together/chat-completion.echo","openai/chat-completion","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty"],"endpoints":["/v1/chat/completions"]},{"id":"Qwen/Qwen2.5-7B-Instruct-Turbo","type":"chat-completion","info":{"name":"Qwen2.5 7B Instruct Turbo","developer":"Alibaba Cloud","description":"Qwen 2.5 7B Instruct Turbo excels in coding and instruction following.","contextLength":32000,"maxTokens":31000,"url":"https://aimlapi.com/models/qwen-2-5-7b-instruct-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen2.5-7b-instruct-turbo"},"features":["openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.response-format","together/chat-completion.echo","openai/chat-completion","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty"],"endpoints":["/v1/chat/completions"]},{"id":"meta-llama/Meta-Llama-3-8B-Instruct-Lite","type":"chat-completion","info":{"name":"Meta Llama 3 8B Instruct Lite","developer":"Meta","description":"Llama 3 8B Instruct Lite: Advanced, fast and cheapest one text generation model optimized for dialogue, emphasizing safety and helpfulness","contextLength":9000,"maxTokens":8000,"url":"https://aimlapi.com/models/llama-3-8b-instruct-lite-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/meta/meta-llama-3-8b-instruct-lite"},"features":["openai/chat-completion.message.developer","together/chat-completion.echo","openai/chat-completion","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.number-of-messages","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty"],"endpoints":["/v1/chat/completions"]},{"id":"claude-3-haiku-20240307","type":"chat-completion","info":{"name":"Claude 3 Haiku 2024-03-07","developer":"Anthropic","description":"Cost-efficient, high-speed AI for entry-level and real-time applications.","contextLength":200000,"maxTokens":4096,"url":"https://aimlapi.com/models/claude-3-haiku","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-3.5-haiku"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-sonnet-4-20250514","type":"chat-completion","info":{"name":"Claude 4 Sonnet","developer":"Anthropic","description":"Claude 4 Sonnet is Anthropic's balanced model combining strong reasoning and coding capabilities with practical efficiency.","contextLength":200000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4-sonnet"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-opus-4-20250514","type":"chat-completion","info":{"name":"Claude 4 Opus","developer":"Anthropic","description":"Claude 4 Opus is Anthropic's most capable model, designed for complex reasoning, analysis, and long-form content generation.","contextLength":200000,"maxTokens":32000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4-opus"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-opus-4-1-20250805","type":"chat-completion","info":{"name":"Claude 4.1 Opus","developer":"Anthropic","description":"Claude 4.1 Opus is Anthropic's most capable model, designed for complex reasoning, analysis, and long-form content generation.","contextLength":200000,"maxTokens":32000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-opus-4.1"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-sonnet-4-5-20250929","type":"chat-completion","info":{"name":"Claude 4.5 Sonnet","developer":"Anthropic","description":"Claude 4.5 Sonnet is Anthropic's balanced model combining strong reasoning and coding capabilities with practical efficiency.","contextLength":200000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4-5-sonnet"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-haiku-4-5-20251001","type":"chat-completion","info":{"name":"Claude 4.5 Haiku","developer":"Anthropic","description":"Claude 4.5 Haiku is a high-speed language model optimized for diverse applications.","contextLength":200000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4.5-haiku"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-opus-4-5-20251101","type":"chat-completion","info":{"name":"Claude 4.5 Opus","developer":"Anthropic","description":"Claude 4.5 Opus is Anthropic's most capable model, designed for complex reasoning, analysis, and long-form content generation.","contextLength":200000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4.5-opus"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-opus-4-6","type":"chat-completion","info":{"name":"Claude 4.6 Opus","developer":"Anthropic","description":"Claude 4.6 Opus is Anthropic's most capable model, designed for complex reasoning, analysis, and long-form content generation.","contextLength":200000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4.6-opus"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"claude-sonnet-4-6","type":"chat-completion","info":{"name":"Claude 4.6 Sonnet","developer":"Anthropic","description":"Claude 4.6 Sonnet is Anthropic's balanced model combining strong reasoning and coding capabilities with practical efficiency.","contextLength":200000,"maxTokens":64000,"url":"","docs_url":""},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"google/gemini-2.0-flash","type":"chat-completion","info":{"name":"Gemini 2.0 Flash","developer":"Google","description":"Gemini 2.0 Flash: Next-gen model with superior speed and multimodal performance.","contextLength":1000000,"maxTokens":8192,"url":"https://aimlapi.com/models/gemini-2-0-flash-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemini-2.0-flash"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemini-2.5-pro","type":"chat-completion","info":{"name":"Gemini 2.5 Pro","developer":"Google","description":"Gemini Pro 2.5 is Google's most advanced reasoning AI model designed for complex tasks, showcasing strong reasoning and coding capabilities. It leads various benchmarks, indicating its enhanced performance.","contextLength":1000000,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemini-2.5-pro"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemini-3-1-pro-preview","type":"chat-completion","info":{"name":"Gemini 3.1 Pro Preview","developer":"Google","description":"A frontier reasoning model optimized for software engineering and agentic workflows with 1M token context.","contextLength":1000000,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemini-3.1-pro-preview"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemini-3-flash-preview","type":"chat-completion","info":{"name":"Gemini 3 Flash Preview","developer":"Google","description":"","contextLength":1000000,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemini-3-flash-preview"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemini-2.5-flash","type":"chat-completion","info":{"name":"Gemini 2.5 Flash","developer":"Google","description":"Gemini 2.5 Flash is a powerful AI model developed by Google, designed to handle complex tasks with high accuracy and efficiency and has thinking capabilities.","contextLength":1000000,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemini-2.5-flash"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemini-2.5-flash-lite-preview","type":"chat-completion","info":{"name":"Gemini 2.5 Flash Lite Preview","developer":"Google","description":"Gemini 2.5 Flash Lite Preview is a lightweight AI model developed by Google, optimized for quick responses and efficient processing, making it ideal for tasks requiring minimal latency and resource consumption","contextLength":1000000,"maxTokens":1048576,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemini-2.5-flash-lite-preview"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek-chat","type":"chat-completion","info":{"name":"DeepSeek V3","developer":"DeepSeek AI","description":"DeepSeek-V3 is a strong Mixture-of-Experts (MoE) language model.","contextLength":128000,"maxTokens":124000,"url":"https://aimlapi.com/models/deepseek-v3","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-chat"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.max-completion-tokens","openai/chat-completion.logit-bias","openrouter/chat-completion.top-a","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek-reasoner","type":"chat-completion","info":{"name":"DeepSeek R1","developer":"DeepSeek AI","description":"DeepSeek-R1 is a first-generation reasoning model.","contextLength":128000,"maxTokens":127000,"url":"https://aimlapi.com/models/deepseek-r1-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-r1"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.logit-bias","openai/chat-completion.number-of-messages"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-chat-v3.1","type":"chat-completion","info":{"name":"DeepSeek V3.1","developer":"DeepSeek AI","description":"DeepSeek-V3.1 is the latest advanced LLM with improved architecture and performance across various natural language tasks.","contextLength":128000,"maxTokens":8000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-chat-v3.1"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.max-completion-tokens","openai/chat-completion.logit-bias","openrouter/chat-completion.top-a","openai/chat-completion.response-format","openai/chat-completion.function"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-reasoner-v3.1","type":"chat-completion","info":{"name":"DeepSeek Reasoner V3.1","developer":"DeepSeek AI","description":"DeepSeek Reasoner V3.1 excels in reasoning tasks with advanced chain-of-thought processing and the latest improvements in parameter activation.","contextLength":128000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-reasoner-v3.1"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.logit-bias","openai/chat-completion.number-of-messages","openai/chat-completion.response-format","openai/chat-completion.function"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-non-reasoner-v3.1-terminus","type":"chat-completion","info":{"name":"DeepSeek V3.1 Terminus","developer":"DeepSeek AI","description":"DeepSeek V3.1 Terminus non-reasoner is the latest advanced LLM, featuring an enhanced architecture and improved performance across a wide range of natural language tasks.","contextLength":128000,"maxTokens":8000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-chat-v3.1"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.max-completion-tokens","openai/chat-completion.logit-bias","openrouter/chat-completion.top-a","openai/chat-completion.response-format","openai/chat-completion.function"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-reasoner-v3.1-terminus","type":"chat-completion","info":{"name":"DeepSeek Reasoner V3.1 Terminus","developer":"DeepSeek AI","description":"DeepSeek Reasoner V3.1 Terminus excels at complex reasoning tasks, leveraging advanced chain-of-thought processing and optimized parameter activation for improved performance.","contextLength":128000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-reasoner-v3.1"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.logit-bias","openai/chat-completion.number-of-messages","openai/chat-completion.response-format","openai/chat-completion.function"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-non-thinking-v3.2-exp","type":"chat-completion","info":{"name":"DeepSeek-V3.2-Exp Non-thinking","developer":"DeepSeek AI","description":"","contextLength":128000,"maxTokens":8000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-chat-v3.2-exp"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.max-completion-tokens","openai/chat-completion.logit-bias","openrouter/chat-completion.top-a","openai/chat-completion.response-format","openai/chat-completion.function"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-thinking-v3.2-exp","type":"chat-completion","info":{"name":"DeepSeek-V3.2-Exp thinking","developer":"DeepSeek AI","description":"","contextLength":128000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/deepseek/deepseek-chat-v3.2-exp"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.message.developer","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openai/chat-completion.logit-bias","openai/chat-completion.number-of-messages","openai/chat-completion.response-format","openai/chat-completion.function"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen-max","type":"chat-completion","info":{"name":"Qwen max","developer":"Alibaba Cloud","description":"Qwen Max: powerful AI, excels in stability, competes globally.","contextLength":32000,"maxTokens":8192,"url":"https://aimlapi.com/models/qwen-max-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen-max"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen-plus","type":"chat-completion","info":{"name":"Qwen Plus","developer":"Alibaba Cloud","description":"Qwen-Plus: Alibaba's advanced multilingual model with enhanced reasoning and instruction-following capabilities.","contextLength":131000,"maxTokens":16384,"url":"https://aimlapi.com/models/qwen-plus-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen-plus"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.number-of-messages","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen-turbo","type":"chat-completion","info":{"name":"Qwen Turbo","developer":"Alibaba Cloud","description":"Qwen Turbo: optimizes AI agent speed, integrates with RAG, large context window.","contextLength":1000000,"maxTokens":16384,"url":"https://aimlapi.com/models/qwen-turbo-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen-turbo"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen-max-2025-01-25","type":"chat-completion","info":{"name":"Qwen Max 2025-01-25","developer":"Alibaba Cloud","description":"Alibaba's Qwen Max: a powerful, multilingual MoE model for diverse AI tasks","contextLength":32000,"maxTokens":8192,"url":"https://aimlapi.com/models/qwen-max-2025-01-25-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen-max"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-coder-480b-a35b-instruct","type":"chat-completion","info":{"name":"Qwen 3 Coder","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 Coder is a powerful coding agent.","contextLength":262000,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-coder-480b-a35b-instruct"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-32b","type":"chat-completion","info":{"name":"Qwen 3 32B","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 32B is a multilingual model with instruction-following capabilities and enhanced reasoning.","contextLength":131000,"maxTokens":16384,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-32b"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","alibaba/chat-completion.reasoning","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-max-preview","type":"chat-completion","info":{"name":"Qwen 3 Max Preview","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 Max Preview is a multilingual model with instruction-following capabilities and enhanced reasoning.","contextLength":252000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-235b-a22b-thinking-2507","type":"chat-completion","info":{"name":"Qwen 3 Thinking 2507","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 thinking 2507 is a multilingual reasoning model with knowledge augmentation, and creative capabilities.","contextLength":32000,"maxTokens":16000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-235b-a22b-thinking-2507"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","alibaba/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-next-80b-a3b-thinking","type":"chat-completion","info":{"name":"Qwen 3 Next 80B A3B Thinking","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 Next 80B A3B Thinking is a multilingual reasoning model with advanced chain-of-thought capabilities.","contextLength":126976,"maxTokens":81920,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-next-80b-a3b-thinking"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","alibaba/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-next-80b-a3b-instruct","type":"chat-completion","info":{"name":"Qwen 3 Next 80B A3B Instruct","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 Next 80B A3B Instruct is a multilingual model with enhanced instruction-following capabilities.","contextLength":129024,"maxTokens":16384,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-next-80b-a3b-instruct"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-max-instruct","type":"chat-completion","info":{"name":"Qwen 3 Max Instruct","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 Max Instruct is the most powerful model in the Qwen 3 series.","contextLength":262144,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-max-instruct"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-omni-30b-a3b-captioner","type":"chat-completion","info":{"name":"Qwen 3 Omni Captioner","developer":"Alibaba Cloud","description":"Qwen3-Omni-Captioner automatically generates accurate and comprehensive descriptions for complex audio, including speech, ambient sounds, music, and sound effects. The model can identify speaker emotions, musical elements, and is suitable for audio content analysis, security audits, and audio editing. Supports audio input up to 40 minutes (1 second = 25 tokens).","contextLength":65536,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-omni-30b-a3b-captioner"},"features":["openai/chat-completion","openai/chat-completion.stream","alibaba/chat-completion.audio-only"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-vl-plus","type":"chat-completion","info":{"name":"Qwen 3 VL Plus","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 VL Plus is a powerful vision-language model with hybrid thinking capabilities. Supports image understanding with up to 16,384 tokens per image and high-resolution mode for detailed analysis.","contextLength":262144,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-vl-plus"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.vision","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","alibaba/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-vl-flash","type":"chat-completion","info":{"name":"Qwen 3 VL Flash","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 VL Flash is a fast and cost-effective vision-language model with hybrid thinking capabilities.","contextLength":262144,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-vl-flash"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.vision","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","alibaba/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-vl-32b-thinking","type":"chat-completion","info":{"name":"Qwen 3 VL 32B Thinking","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 VL 32B Thinking is a powerful vision-language model with hybrid thinking capabilities.","contextLength":126000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-vl-32b-thinking"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","openai/chat-completion.vision"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3-vl-32b-instruct","type":"chat-completion","info":{"name":"Qwen 3 VL 32B Instruct","developer":"Alibaba Cloud","description":"Alibaba's Qwen 3 VL 32B Instruct is a powerful vision-language model with hybrid instruction-following capabilities.","contextLength":126000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3-vl-32b-instruct"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","openai/chat-completion.vision"],"endpoints":["/v1/chat/completions"]},{"id":"alibaba/qwen3.5-plus-20260218","type":"chat-completion","info":{"name":"Qwen 3.5 Plus","developer":"Alibaba Cloud","description":"Qwen3.5-Plus is a commercial large language model by Alibaba Cloud designed for long-context text generation and enterprise-grade conversational AI. Supports up to 1M tokens per request with production-ready API stability.","contextLength":1000000,"maxTokens":65536,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/alibaba-cloud/qwen3.5-plus"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.max-completion-tokens","together/chat-completion.repetition-penalty","alibaba/chat-completion.reasoning","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"anthropic/claude-opus-4.6","type":"chat-completion","info":{"name":"Claude 4.6 Opus","developer":"Anthropic","description":"Claude 4.6 Opus is Anthropic's most capable model, designed for complex reasoning, analysis, and long-form content generation.","contextLength":200000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthropic/claude-4.6-opus"},"features":["anthropic/message-completion","anthropic/message-completion.vision","anthropic/message-completion.function","anthropic/message-completion.reasoning"],"endpoints":["/v1/chat/completions","/v1/messages"]},{"id":"mistralai/mistral-nemo","type":"chat-completion","info":{"name":"Mistral nemo","developer":"Mistral AI","description":"Mistral-Nemo is a powerful multilingual language model with advanced capabilities.","contextLength":128000,"maxTokens":131072,"url":"https://aimlapi.com/models/mistral-nemo-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/mistral-ai/mistral-nemo"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.logit-bias","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"anthracite-org/magnum-v4-72b","type":"chat-completion","info":{"name":"Magnum v4 72B","developer":"Anthracite","description":"Magnum V4 is a powerful language model optimized for high-quality text generation tasks.","contextLength":32000,"maxTokens":16384,"url":"https://aimlapi.com/models/magnum-v4-72b-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/anthracite/magnum-v4"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.logit-bias","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"gryphe/mythomax-l2-13b","type":"chat-completion","info":{"name":"MythoMax 13B","developer":"Gryphe","description":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.","contextLength":4096,"maxTokens":4096,"url":"","docs_url":""},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.stop"],"endpoints":["/v1/chat/completions"]},{"id":"nvidia/llama-3.1-nemotron-70b-instruct","type":"chat-completion","info":{"name":"Llama 3.1 nemotron 70B instruct","developer":"Nvidia","description":" Llama 3.1 Nemotron is an advanced instruction-following language model optimized for high-performance applications.","contextLength":128000,"maxTokens":131072,"url":"https://aimlapi.com/models/llama-3-1-nemotron-70b-instruct-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/nvidia/llama-3.1-nemotron-70b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.logit-bias","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemma-3-4b-it","type":"chat-completion","info":{"name":"Gemma 3 4B","developer":"Google","description":"Gemma 3 4B is a lightweight open language model from Google, suitable for on-device deployment and efficient inference.","contextLength":131000,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemma-3"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.vision","openai/chat-completion.stop"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemma-3-12b-it","type":"chat-completion","info":{"name":"Gemma 3 12B","developer":"Google","description":"Gemma 3 12B is a mid-size open language model from Google offering a balance of capability and efficiency.","contextLength":131000,"maxTokens":96000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemma-3"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.vision","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemma-3-27b-it","type":"chat-completion","info":{"name":"Gemma 3 27B","developer":"Google","description":"Gemma 3 27B is Google's largest Gemma open model, delivering strong reasoning and text generation capabilities.","contextLength":128000,"maxTokens":131072,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemma-3"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.vision","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"deepseek/deepseek-v3.2-speciale","type":"chat-completion","info":{"name":"DeepSeek-V3.2-Speciale","developer":"DeepSeek AI","description":"DeepSeek-V3.2-Speciale is a high-compute variant of DeepSeek-V3.2 optimized for maximum reasoning and agentic performance.","contextLength":128000,"maxTokens":128000,"url":"","docs_url":""},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.logit-bias","openai/chat-completion.number-of-messages"],"endpoints":["/v1/chat/completions"]},{"id":"google/gemma-3n-e4b-it","type":"chat-completion","info":{"name":"Gemma 3n 4B","developer":"Google","description":"Gemma 3n 4B is an ultra-efficient next-generation Gemma model optimized for on-device multimodal applications.","contextLength":8192,"maxTokens":2048,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/google/gemma-3n-4b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.stop","openai/chat-completion.logit-bias"],"endpoints":["/v1/chat/completions"]},{"id":"cohere/command-a","type":"chat-completion","info":{"name":"Command A","developer":"Cohere","description":"Command A is a powerful LLM with advanced capabilities for enterprise applications.","contextLength":256000,"maxTokens":256000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/cohere/command-a"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"openai/gpt-oss-120b","type":"chat-completion","info":{"name":"GPT OSS 120B","developer":"Open AI","description":"GPT OSS 120B is the most powerful Open AI open weight model.","contextLength":131000,"maxTokens":131072,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-oss-120b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","openai/chat-completion.reasoning","together/chat-completion.echo"],"endpoints":["/v1/chat/completions"]},{"id":"openai/gpt-oss-20b","type":"chat-completion","info":{"name":"GPT OSS 20B","developer":"Open AI","description":"GPT OSS 20B is a powerful Open AI open weight model.","contextLength":131000,"maxTokens":131072,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/openai/gpt-oss-20b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","openai/chat-completion.reasoning","together/chat-completion.echo"],"endpoints":["/v1/chat/completions"]},{"id":"nousresearch/hermes-4-405b","type":"chat-completion","info":{"name":"Nous: Hermes 4 405B","developer":"NousResearch","description":"Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B with hybrid reasoning mode. It can choose to deliberate internally with <think>...</think> traces or respond directly, offering flexibility between speed and depth. Supports structured outputs, JSON mode, function calling, and tool use.","contextLength":131072,"maxTokens":16000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"nvidia/nemotron-nano-9b-v2","type":"chat-completion","info":{"name":"Nemotron Nano 9B V2","developer":"Nvidia","description":"","contextLength":131072,"maxTokens":131072,"url":"","docs_url":""},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.seed","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","openrouter/chat-completion.reasoning","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","openrouter/chat-completion.top-a","together/chat-completion.repetition-penalty"],"endpoints":["/v1/chat/completions"]},{"id":"nvidia/nemotron-nano-12b-v2-vl","type":"chat-completion","info":{"name":"Nemotron Nano 12B V2 VL","developer":"Nvidia","description":"","contextLength":131072,"maxTokens":131072,"url":"","docs_url":""},"features":["openai/chat-completion.message.system","openai/chat-completion.message.developer","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.seed","openai/chat-completion.presence-penalty","openrouter/chat-completion.reasoning","together/chat-completion.echo","together/chat-completion.min-p","together/chat-completion.top-k","openrouter/chat-completion.top-a","together/chat-completion.repetition-penalty"],"endpoints":["/v1/chat/completions"]},{"id":"xiaomi/mimo-v2-flash","type":"chat-completion","info":{"name":"MiMo V2 Flash","developer":"Xiaomi","description":"Xiaomi MiMo V2 Flash model for general chat completions.","contextLength":128000,"maxTokens":0,"url":"","docs_url":""},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.min-p","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"MiniMax-Text-01","type":"chat-completion","info":{"contextLength":1000000,"maxTokens":40000,"description":"MiniMax-Text-01 excels in long-context processing with advanced features like hybrid attention mechanisms and open-source accessibility.","name":"Text 01","developer":"Minimax AI","url":"https://aimlapi.com/models/minimax-text-01-api","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/minimax/text-01"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature?min=0&max=1","openai/chat-completion.top-p","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.vision","minimax/chat-completion.mask-sensitive-info"],"endpoints":["/v1/chat/completions"]},{"id":"minimax/m1","type":"chat-completion","info":{"contextLength":1000000,"maxTokens":40960,"description":"MiniMax-M1 is a powerful reasoning model with a 1M token context window, delivering strong performance on complex tasks.","name":"Minimax M1","developer":"Minimax AI","url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/minimax/m1"},"features":["minimax/chat-completion.mask-sensitive-info","openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature?min=0&max=1","openai/chat-completion.top-p","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"minimax/m2","type":"chat-completion","info":{"contextLength":1000000,"maxTokens":40960,"description":"MiniMax-M2 is an advanced large language model with 1M token context for extended reasoning and generation.","name":"Minimax M2","developer":"Minimax AI","url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/minimax/m2"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature?min=0&max=1","openai/chat-completion.top-p","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"minimax/m2-1","type":"chat-completion","info":{"contextLength":204000,"maxTokens":204000,"description":"MiniMax-M2.1 is an updated Minimax model with a 200K context window optimized for conversational and reasoning tasks.","name":"Minimax M2.1","developer":"Minimax AI","url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/minimax/m2-1"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.logit-bias","openai/chat-completion.logprobs","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.seed","openai/chat-completion.presence-penalty","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"minimax/m2-5-20260218","type":"chat-completion","info":{"contextLength":204800,"maxTokens":204800,"description":"MiniMax-M2.5 is a general-purpose large language model by MiniMax designed for text generation and conversational AI use cases.","name":"MiniMax M2.5","developer":"Minimax AI","url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/minimax/m2-5"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.logit-bias","openai/chat-completion.logprobs","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.seed","openai/chat-completion.presence-penalty","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"minimax/m2-5-highspeed-20260218","type":"chat-completion","info":{"contextLength":204800,"maxTokens":204800,"description":"MiniMax-M2.5 Highspeed is a low-latency, high-throughput version of MiniMax-M2.5 optimized for real-time applications.","name":"MiniMax M2.5 Highspeed","developer":"Minimax AI","url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/minimax/m2-5-highspeed"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.file","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.logit-bias","openai/chat-completion.logprobs","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.seed","openai/chat-completion.presence-penalty","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"moonshot/kimi-k2-5","type":"chat-completion","info":{"name":"Kimi k2.5","developer":"Moonshot","description":"Kimi k2.5 is a powerful agentic model by Moonshot AI with strong coding and tool-use capabilities.","contextLength":131000,"maxTokens":131000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/moonshot/kimi-k2-5"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.vision","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","moonshot/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"moonshot/kimi-k2-preview","type":"chat-completion","info":{"name":"Kimi k2 preview","developer":"Moonshot","description":"Kimi k2 Preview is an early access release of Moonshot AI's frontier agentic language model.","contextLength":131000,"maxTokens":130000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/moonshot/kimi-k2-preview"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","moonshot/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"moonshot/kimi-k2-0905-preview","type":"chat-completion","info":{"name":"Kimi k2 0905 preview","developer":"Moonshot","description":"Kimi k2 0905 Preview is an updated preview of Kimi k2 with extended context and improved performance.","contextLength":262000,"maxTokens":260000,"url":"","docs_url":""},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","moonshot/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"moonshot/kimi-k2-turbo-preview","type":"chat-completion","info":{"name":"Kimi k2 turbo preview","developer":"Moonshot","description":"Kimi k2 Turbo Preview is a faster, more efficient variant of Kimi k2 for high-throughput agentic applications.","contextLength":262000,"maxTokens":260000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/moonshot/kimi-k2-turbo-preview"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","moonshot/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-4-07-09","type":"chat-completion","info":{"name":"Grok 4","developer":"X AI","description":"Grok 4 is the latest model with 256k context, text, structured outputs, and parallel tools, with always-on, non-configurable reasoning.","contextLength":256000,"maxTokens":255000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-4"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs","openrouter/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-3-beta","type":"chat-completion","info":{"name":"Grok 3 Beta","developer":"X AI","description":"Grok Beta is a state-of-the-art AI model by xAI with real-time world knowledge and multi-modal capabilities.","contextLength":131000,"maxTokens":130000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-3-beta"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.stop","openai/chat-completion.logprobs","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-3-mini-beta","type":"chat-completion","info":{"name":"Grok 3 Beta Mini","developer":"X AI","description":"Grok Beta Mini is a state-of-the-art AI model by xAI with real-time world knowledge and multi-modal capabilities.","contextLength":131000,"maxTokens":130000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-3-mini-beta"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs","openrouter/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-code-fast-1","type":"chat-completion","info":{"name":"Grok Code Fast 1","developer":"X AI","description":"A speedy and economical reasoning model that excels at agentic coding with function calling and structured outputs.","contextLength":256000,"maxTokens":255000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-code-fast-1"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs","openrouter/chat-completion.reasoning","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-4-fast-reasoning","type":"chat-completion","info":{"name":"Grok 4 Fast Reasoning","developer":"X AI","description":"Grok 4 Fast with always-on reasoning for complex problem-solving tasks.","contextLength":2000000,"maxTokens":1999000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-4-fast-reasoning"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs","openrouter/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-4-fast-non-reasoning","type":"chat-completion","info":{"name":"Grok 4 Fast Non-Reasoning","developer":"X AI","description":"Grok 4 Fast without reasoning for faster responses and lower latency.","contextLength":2000000,"maxTokens":1999000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-4-fast-non-reasoning"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-4-1-fast-reasoning","type":"chat-completion","info":{"name":"Grok 4.1 Fast Reasoning","developer":"X AI","description":"Grok 4.1 Fast is a frontier multimodal model optimized for high-performance agentic tool calling with advanced reasoning capabilities.","contextLength":2000000,"maxTokens":1999000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-4-1-fast-reasoning"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs","openrouter/chat-completion.reasoning"],"endpoints":["/v1/chat/completions"]},{"id":"x-ai/grok-4-1-fast-non-reasoning","type":"chat-completion","info":{"name":"Grok 4.1 Fast Non-Reasoning","developer":"X AI","description":"Grok 4.1 Fast without reasoning for faster responses and lower latency in high-performance agentic applications.","contextLength":2000000,"maxTokens":1999000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/xai/grok-4-1-fast-non-reasoning"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.prediction","openai/chat-completion.message.system","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.developer","openai/chat-completion.logprobs"],"endpoints":["/v1/chat/completions"]},{"id":"perplexity/sonar","type":"chat-completion","info":{"name":"Sonar","developer":"Perplexity","description":"Perplexity Sonar is a fast search-augmented language model grounded in real-time web data for up-to-date answers.","contextLength":128000,"maxTokens":100000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/perplexity/sonar"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.search","together/chat-completion.top-k","perplexity/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"perplexity/sonar-pro","type":"chat-completion","info":{"name":"Sonar Pro","developer":"Perplexity","description":"Perplexity Sonar Pro is an advanced search-augmented language model with deeper reasoning and broader web coverage.","contextLength":200000,"maxTokens":100000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/perplexity/sonar-pro"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.logit-bias","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.seed","openai/chat-completion.response-format","openai/chat-completion.search","together/chat-completion.top-k","perplexity/chat-completion.search"],"endpoints":["/v1/chat/completions"]},{"id":"zhipu/glm-4.5","type":"chat-completion","info":{"name":"GLM 4.5","developer":"Zhipu AI","description":"Advanced version of GLM-4.5 with enhanced capabilities and web search integration","contextLength":128000,"maxTokens":98000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/zhipu/glm-4.5"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.top-p","zhipu/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","zhipu/chat-completion.thinking","zhipu/chat-completion.web-search"],"endpoints":["/v1/chat/completions"]},{"id":"zhipu/glm-4.5-air","type":"chat-completion","info":{"name":"GLM 4.5 Air","developer":"Zhipu AI","description":"Lightweight version of GLM-4.5 for cost-effective applications","contextLength":128000,"maxTokens":98000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/zhipu/glm-4.5-air"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.top-p","zhipu/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","zhipu/chat-completion.thinking","zhipu/chat-completion.web-search"],"endpoints":["/v1/chat/completions"]},{"id":"zhipu/glm-4.6","type":"chat-completion","info":{"name":"GLM 4.6","developer":"Zhipu AI","description":"Advanced version of GLM-4.6 with enhanced capabilities and web search integration","contextLength":200000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/zhipu/glm-4.6"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.top-p","zhipu/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","zhipu/chat-completion.thinking","zhipu/chat-completion.web-search"],"endpoints":["/v1/chat/completions"]},{"id":"zhipu/glm-4.7","type":"chat-completion","info":{"name":"GLM 4.7","developer":"Zhipu AI","description":"GLM-4.7 is Zhipu AI flagship text LLM optimized for agentic coding and stable multi-step reasoning, supporting long-context workflows.","contextLength":200000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/zhipu/glm-4.7"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.top-p","zhipu/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","zhipu/chat-completion.thinking","zhipu/chat-completion.web-search"],"endpoints":["/v1/chat/completions"]},{"id":"zhipu/glm-5","type":"chat-completion","info":{"name":"GLM 5","developer":"Zhipu AI","description":"GLM-5 is Zhipu AI next-generation 745B MoE LLM for high-quality text generation and reasoning, optimized for chat, long-form content, and instruction-following tasks.","contextLength":200000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/zhipu/glm-5"},"features":["openai/chat-completion","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.max-completion-tokens","openai/chat-completion.stream","openai/chat-completion.number-of-messages","openai/chat-completion.top-p","zhipu/chat-completion.stop","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.response-format","zhipu/chat-completion.thinking","zhipu/chat-completion.web-search"],"endpoints":["/v1/chat/completions"]},{"id":"meta-llama/llama-3.3-70b-versatile","type":"chat-completion","info":{"name":"Llama 3.3 70B Versatile","developer":"Meta","description":"Llama-3.3-70B-Versatile is Meta's advanced multilingual large language model, optimized for a wide range of natural language processing tasks. With 70 billion parameters, it offers high performance across various benchmarks while maintaining efficiency suitable for diverse applications.","contextLength":131072,"maxTokens":32768,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/meta/llama-3.3-70b-versatile"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.message.system","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.frequency-penalty","openai/chat-completion.presence-penalty","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"bytedance/seed-1-8","type":"chat-completion","info":{"name":"Seed 1.8","developer":"ByteDance","description":"ByteDance Seed 1.8 is a state-of-the-art multimodal model with exceptional vision and reasoning capabilities.","contextLength":256000,"maxTokens":224000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/bytedance/seed-1.8"},"features":["openai/chat-completion","openai/chat-completion.vision","openai/chat-completion.function","openai/chat-completion.message.refusal","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.system","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.seed","openai/chat-completion.presence-penalty","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-0-3b","type":"chat-completion","info":{"name":"Ernie 4.5 0.3B","developer":"Baidu","description":"Lightweight Ernie 4.5 0.3B model that is marked as free on the provider portal.","contextLength":120000,"maxTokens":120000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-0.3b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-8k-preview","type":"chat-completion","info":{"name":"Ernie 4.5 8K Preview","developer":"Baidu","description":"Preview release of Ernie 4.5 configured for an 8K context window.","contextLength":8000,"maxTokens":2000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-8k-preview"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-turbo-128k","type":"chat-completion","info":{"name":"Ernie 4.5 128K Turbo","developer":"Baidu","description":"Extended-context Ernie 4.5 Turbo variant optimized for 128K windows.","contextLength":128000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-turbo-128k"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-21b-a3b-thinking","type":"chat-completion","info":{"name":"Ernie 4.5 21B A3B Thinking","developer":"Baidu","description":"Reasoning-optimized Thinking variant of Ernie 4.5 21B A3B.","contextLength":131000,"maxTokens":131000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-21b-a3b-thinking"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-21b-a3b","type":"chat-completion","info":{"name":"Ernie 4.5 21B A3B","developer":"Baidu","description":"Primary Ernie 4.5 21B A3B chat model for general purpose tasks.","contextLength":120000,"maxTokens":120000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-21b-a3b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-vl-28b-a3b","type":"chat-completion","info":{"name":"Ernie 4.5 VL 28B A3B","developer":"Baidu","description":"Vision-language Ernie 4.5 VL 28B A3B for multimodal understanding.","contextLength":30000,"maxTokens":30000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-vl-28b-a3b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.vision","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-vl-424b-a47b","type":"chat-completion","info":{"name":"Ernie 4.5 VL 424B A47B","developer":"Baidu","description":"High-capacity vision-language Ernie 4.5 VL 424B A47B for demanding multimodal workflows.","contextLength":123000,"maxTokens":123000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-vl-424b-a47b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.vision","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-300b-a47b","type":"chat-completion","info":{"name":"Ernie 4.5 300B A47B","developer":"Baidu","description":"Top-tier Ernie 4.5 300B A47B for complex reasoning and long-form output.","contextLength":123000,"maxTokens":123000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-300b-a47b"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-300b-a47b-paddle","type":"chat-completion","info":{"name":"Ernie 4.5 300B A47B Paddle","developer":"Baidu","description":"Ernie 4.5 300B model variant hosted via Paddle for enterprise deployments.","contextLength":123000,"maxTokens":123000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-300b-a47b-paddle"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-4-5-turbo-vl-32k","type":"chat-completion","info":{"name":"Ernie 4.5 Turbo VL 32K","developer":"Baidu","description":"Vision-capable Ernie 4.5 Turbo with 32K context for multimodal interactions.","contextLength":32000,"maxTokens":32000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-4.5-turbo-vl-32k"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.vision","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-5-0-thinking-latest","type":"chat-completion","info":{"name":"Ernie 5.0 Thinking Latest","developer":"Baidu","description":"Latest Ernie 5.0 Thinking release with broad capability support.","contextLength":128000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-5.0-thinking-latest"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-5-0-thinking-preview","type":"chat-completion","info":{"name":"Ernie 5.0 Thinking Preview","developer":"Baidu","description":"Reasoning preview of Ernie 5.0 Thinking aimed at early experimentation.","contextLength":128000,"maxTokens":128000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-5.0-thinking-preview"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-x1-1-preview","type":"chat-completion","info":{"name":"Ernie X1 1 Preview","developer":"Baidu","description":"Preview entry for the Ernie X1-1 family showcasing early capabilities.","contextLength":64000,"maxTokens":64000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-x1.1-preview"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"baidu/ernie-x1-turbo-32k","type":"chat-completion","info":{"name":"Ernie X1 Turbo 32K","developer":"Baidu","description":"Ernie X1 Turbo with 32K context for high-throughput reasoning.","contextLength":32000,"maxTokens":32000,"url":"","docs_url":"https://docs.aimlapi.com/api-references/text-models-llm/baidu/ernie-x1-turbo-32k"},"features":["openai/chat-completion","openai/chat-completion.message.file","openai/chat-completion.message.assistant","openai/chat-completion.stream","openai/chat-completion.max-completion-tokens","openai/chat-completion.temperature","openai/chat-completion.top-p","openai/chat-completion.seed","together/chat-completion.top-k","together/chat-completion.repetition-penalty","openrouter/chat-completion.top-a","openai/chat-completion.frequency-penalty","openai/chat-completion.prediction","openai/chat-completion.presence-penalty","openai/chat-completion.message.system","openai/chat-completion.function","openai/chat-completion.parallel-tool-calls","openai/chat-completion.message.refusal","openai/chat-completion.stop","openai/chat-completion.response-format"],"endpoints":["/v1/chat/completions"]},{"id":"google/gc-document-ai","type":"document","info":{"name":"GC document AI","developer":"Google","description":"High-accuracy optical character recognition and document understanding powered by Google Document AI.","url":"","docs_url":"https://docs.aimlapi.com/api-references/vision-models/ocr-optical-character-recognition/google/google-ocr"},"features":[],"endpoints":["/v1/ocr"]},{"id":"mistral/mistral-ocr-latest","type":"document","info":{"name":"Mistral OCR","developer":"Mistral AI","description":"Next-generation OCR model from Mistral AI designed for high-fidelity text extraction and document layout analysis.","url":"","docs_url":"https://docs.aimlapi.com/api-references/vision-models/ocr-optical-character-recognition/mistral-ai/mistral-ocr-latest"},"features":[],"endpoints":["/v1/ocr"]},{"id":"zhipu/glm-ocr","type":"document","info":{"name":"GLM-OCR","developer":"Zhipu AI","description":"OCR and document understanding model for extracting text and structured data from images and documents.","url":"","docs_url":""},"features":[],"endpoints":["/v1/ocr"]},{"id":"dall-e-3","type":"image","info":{"developer":"Open AI","description":"Turn text into art with DALLÂ·E 3, the AI that brings creative visions to life.","name":"DALL-E 3","url":"https://aimlapi.com/models/openai-dall-e-3","docs_url":"https://docs.aimlapi.com/api-references/image-models/openai/dall-e-3"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"dall-e-2","type":"image","info":{"developer":"Open AI","description":"DALLÂ·E 2 generates realistic images from text, enhancing creative applications","name":"DALL-E 2","url":"https://aimlapi.com/models/openai-dall-e-2-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/openai/dall-e-2"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"openai/gpt-image-1","type":"image","info":{"developer":"Open AI","description":"","name":"GPT Image 1","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/openai/gpt-image-1"},"features":[],"endpoints":["/v1/images/generations","/v1/images/edits"]},{"id":"openai/gpt-image-1-mini","type":"image","info":{"developer":"Open AI","description":"","name":"GPT Image 1 Mini","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/openai/"},"features":[],"endpoints":["/v1/images/generations","/v1/images/edits"]},{"id":"openai/gpt-image-1-5","type":"image","info":{"developer":"Open AI","description":"","name":"GPT-Image-1.5","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations","/v1/images/edits"]},{"id":"imagen-3.0-generate-002","type":"image","info":{"name":"Imagen 3.0","developer":"Google","description":"Imagen 3: Google's high-quality text-to-image model with enhanced realism and understanding.","url":"https://aimlapi.com/models/imagen-3-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/imagen-3.0"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/imagen-4.0-generate-001","type":"image","info":{"name":"Imagen 4.0 Generate","developer":"Google","description":"Imagen 4.0 Generate: Google's high-quality text-to-image model with enhanced realism and understanding.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/imagen-4.0"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/imagen-4.0-fast-generate-001","type":"image","info":{"name":"Imagen 4.0 Fast Generate","developer":"Google","description":"Imagen 4.0 Fast Generate: Google's fast text-to-image model with enhanced realism and understanding.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/imagen-4.0"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/imagen-4.0-ultra-generate-001","type":"image","info":{"name":"Imagen 4.0 Ultra Generate","developer":"Google","description":"Imagen 4.0 Ultra Generate: Google's high-quality text-to-image model with enhanced realism and understanding.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/imagen-4.0"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/imagen4/preview","type":"image","info":{"name":"Imagen 4.0 Generate Preview","developer":"Google","description":"Imagen 4.0 Generate: Google's high-quality text-to-image model with enhanced realism and understanding.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/imagen-4.0"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/gemini-3-pro-image-preview","type":"image","info":{"name":"Gemini 3 Pro Image","developer":"Google","description":"Gemini 3 Pro Image is a text-to-image model that generates images from text descriptions.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/gemini-3-pro-image-preview"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/nano-banana-pro","type":"image","info":{"name":"Nano Banana Pro","developer":"Google","description":"Nano Banana Pro is a text-to-image model that generates images from text descriptions.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/gemini-3-pro-image-preview"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/gemini-3-pro-image-preview-edit","type":"image","info":{"name":"Gemini 3 Pro Image Edit","developer":"Google","description":"Gemini 3 Pro Image Edit is an image-to-image model that edits images based on text prompts.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/gemini-3-pro-image-preview-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/nano-banana-pro-edit","type":"image","info":{"name":"Nano Banana Pro Edit","developer":"Google","description":"Nano Banana Pro Edit is an image-to-image model that edits images based on text prompts.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/gemini-3-pro-image-preview-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/gemini-2.5-flash-image","type":"image","info":{"name":"Gemini 2.5 Flash Image","developer":"Google","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/gemini-2.5-flash-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"google/gemini-2.5-flash-image-edit","type":"image","info":{"name":"Gemini 2.5 Flash Image Edit","developer":"Google","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/google/gemini-2.5-flash-image-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/schnell","type":"image","info":{"name":"Flux Schnell","developer":"Flux","description":"FLUX.1 [schnell] is a cutting-edge text-to-image model for developers.","url":"https://aimlapi.com/models/flux-1-schnell-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-schnell"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux-pro","type":"image","info":{"name":"Flux pro","developer":"Flux","description":"FLUX.1 [pro]: The ultimate AI image generation model for professionals.","url":"https://aimlapi.com/models/flux-1-pro-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-pro"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux-pro/v1.1","type":"image","info":{"name":"Flux pro 1.1","developer":"Flux","description":"FLUX 1.1 [Pro] delivers rapid image generation with advanced features tailored for developers.","url":"https://aimlapi.com/models/flux-1-1-pro-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-pro-v1.1"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux-pro/v1.1-ultra","type":"image","info":{"name":"Flux pro 1.1 ultra","developer":"Flux","description":"Flux 1.1 Pro Ultra is a high-speed AI image generator optimized for diverse applications.","url":"https://aimlapi.com/models/flux-1-1-pro-ultra-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-pro-v1.1-ultra"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/dev","type":"image","info":{"name":"Flux dev","developer":"Flux","description":"FLUX.1 [dev] is an advanced image generation model by Black Forest Labs.","url":"https://aimlapi.com/models/flux-1-dev-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-dev"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/dev/image-to-image","type":"image","info":{"name":"Flux dev image-to-image","developer":"Flux","description":"FLUX.1 [dev] is an advanced image generation model by Black Forest Labs.","url":"https://aimlapi.com/models/flux-1-dev-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-dev-image-to-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/srpo","type":"image","info":{"name":"Flux SRPO","developer":"Flux","description":"FLUX.1 SRPO is an advanced image generation model optimized for superior quality and performance.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/srpo/image-to-image","type":"image","info":{"name":"Flux SRPO Image-to-Image","developer":"Flux","description":"FLUX.1 SRPO image-to-image model for advanced image transformation and editing.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"bytedance/uso","type":"image","info":{"name":"USO Image-to-Image","developer":"ByteDance","description":"USO is an advanced image-to-image model for style transfer and image transformation.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/bytedance"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"bytedance/seedream-v4-edit","type":"image","info":{"name":"SeedDream 4.0 Image-to-Image","developer":"ByteDance","description":"Edit images using ByteDance's SeedDream 4 model.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"bytedance/seedream-v4-text-to-image","type":"image","info":{"name":"SeedDream 4.0 Text to Image","developer":"ByteDance","description":"Generate images using ByteDance's SeedDream 4 text-to-image model.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"stable-diffusion-v3-medium","type":"image","info":{"name":"Stable diffusion v3 medium","developer":"Stability AI","description":"Enhanced Stable Diffusion 3 text-to-image model with improved text quality, efficiency and understanding","url":"https://aimlapi.com/models/stable-diffusion-3-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/stability-ai/stable-diffusion-v3-medium"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"stable-diffusion-v35-large","type":"image","info":{"name":"Stable diffusion v3.5 large","developer":"Stability AI","description":"Stable Diffusion 3.5 Large enhances image generation with advanced architecture and diverse outputs.","url":"https://aimlapi.com/models/stable-diffusion-3-5-large-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/stability-ai/stable-diffusion-v35-large"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux-realism","type":"image","info":{"name":"Flux realism","developer":"Flux","description":"FLUX Realism LoRA generates photorealistic images from text prompts, enhancing creative possibilities.","url":"https://aimlapi.com/models/flux-realism-lora-api","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-realism"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"recraft-v3","type":"image","info":{"name":"Recraft v3","developer":"Recraft AI","description":"State-of-the-art AI image generator with superior text and vector capabilities.","url":"https://aimlapi.com/models/recraft-v3","docs_url":"https://docs.aimlapi.com/api-references/image-models/recraftai/recraft-v3"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"triposr","type":"image","info":{"name":"Triposr","developer":"Tripo AI","description":"TripoSR: Fast, transformer-based 3D reconstruction model from single RGB images.","url":"https://aimlapi.com/models/stable-tripo-sr-api","docs_url":"https://docs.aimlapi.com/api-references/3d-generating-models/stability-ai/triposr"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/kontext-max/text-to-image","type":"image","info":{"name":"Flux Kontext Max","developer":"Flux","description":"FLUX.1 [kontext] [max] is a high-performance image generation model with maximum creative control.","url":"https://aimlapi.com/models/flux-1-kontext-max","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-kontext-max-text-to-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/kontext-pro/text-to-image","type":"image","info":{"name":"Flux Kontext Pro","developer":"Flux","description":"FLUX.1 [kontext] [pro] is a professional-grade image generation model with advanced features.","url":"https://aimlapi.com/models/flux-1-kontext-pro","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-kontext-pro-text-to-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/kontext-pro/image-to-image","type":"image","info":{"name":"Flux Kontext Pro Image-to-Image","developer":"Flux","description":"FLUX.1 [kontext] [pro] is a professional-grade image generation model with advanced features.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-kontext-pro-image-to-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"flux/kontext-max/image-to-image","type":"image","info":{"name":"Flux Kontext Max Image-to-Image","developer":"Flux","description":"FLUX.1 [kontext] [max] is a high-performance image generation model with maximum creative control.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-kontext-max-image-to-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"hunyuan/hunyuan-image-v3-text-to-image","type":"image","info":{"name":"Hunyuan Image V3","developer":"Tencent","description":"Hunyuan Image V3 is an advanced text-to-image model that generates high-quality images from text prompts.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"tencent/hunyuan-part","type":"image","info":{"name":"Tencent Hunyuan Part","developer":"Tencent","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/3d-generating-models/tencent/hunyuan-part"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2","type":"image","info":{"name":"Flux 2","developer":"Flux","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-2"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-edit","type":"image","info":{"name":"Flux 2 Edit","developer":"Flux","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-2-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-lora","type":"image","info":{"name":"Flux 2 Lora","developer":"Flux","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-2-lora"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-lora-edit","type":"image","info":{"name":"Flux 2 Lora Edit","developer":"Flux","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-2-lora-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-pro","type":"image","info":{"name":"Flux 2 Pro","developer":"Flux","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-2-pro"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-pro-edit","type":"image","info":{"name":"Flux 2 Pro Edit","developer":"Flux","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/flux/flux-2-pro-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-max","type":"image","info":{"name":"Flux 2 Max","developer":"Flux","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"blackforestlabs/flux-2-max-edit","type":"image","info":{"name":"Flux 2 Max Edit","developer":"Flux","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/z-image-turbo-lora","type":"image","info":{"name":"Z-Image Turbo LoRA","developer":"Alibaba Cloud","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/alibaba-cloud/z-image-turbo-lora"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"klingai/image-o1","type":"image","info":{"name":"Kling Image O1","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"bytedance/seedream-4-5","type":"image","info":{"developer":"ByteDance","description":"","name":"Seedream 4.5","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/bytedance/seedream-4-5"},"features":[],"endpoints":["v1/images/generations"]},{"id":"bytedance/seedream-5-0-lite-preview","type":"image","info":{"developer":"ByteDance","description":"","name":"Seedream 5.0 Lite Preview","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/bytedance/seedream-5-0-lite-preview"},"features":[],"endpoints":["v1/images/generations"]},{"id":"alibaba/qwen-image-edit","type":"image","info":{"developer":"Alibaba Cloud","description":"Qwen image editing model for modifying existing images based on text prompts","name":"Qwen Image Edit","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/alibaba-cloud/qwen-image-edit"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/wan2.5-t2i-preview","type":"image","info":{"developer":"Alibaba Cloud","description":"Wan 2.5 preview text-to-image model with no single-side length limit.","name":"Wan 2.5 Preview","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/alibaba-cloud/wan2.5-t2i-preview"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/wan2.2-t2i-plus","type":"image","info":{"developer":"Alibaba Cloud","description":"Wan 2.2 Professional Edition text-to-image model with improved stability and success rate compared to the 2.1 model.","name":"Wan 2.2 Plus","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/alibaba-cloud/wan2.2-t2i-plus"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/wan2.2-t2i-flash","type":"image","info":{"developer":"Alibaba Cloud","description":"Wan 2.2 Flash Edition text-to-image model - 50% faster than the 2.1 model.","name":"Wan 2.2 Flash","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/wan-2-6-image","type":"image","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.6 - Image generation","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/qwen-image","type":"image","info":{"name":"Qwen Image","developer":"Alibaba Cloud","description":"General-purpose image generation model with multiple artistic styles and strong complex text rendering.","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/alibaba-cloud/qwen-image"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"alibaba/z-image-turbo","type":"image","info":{"name":"Z-Image Turbo","developer":"Alibaba Cloud","description":"Lightweight image model for fast generation with Chinese and English text rendering.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"bytedance/seedream-3.0","type":"image","info":{"developer":"ByteDance","description":"","name":"Seedream 3","url":"","docs_url":"https://docs.aimlapi.com/api-references/image-models/bytedance/seedream-3.0"},"features":[],"endpoints":["/v1/images/generations"]},{"id":"reve/create-image","type":"image","info":{"name":"Reve Image Create","developer":"Reve AI","description":"Reve v1 is a high-performance image generation model with advanced creative features.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"reve/edit-image","type":"image","info":{"name":"Reve Image Edit","developer":"Reve AI","description":"Edit images based on a text description using Reve AI's image editing capabilities.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"reve/remix-edit-image","type":"image","info":{"name":"Reve Image Remix","developer":"Reve AI","description":"Create images from a text description and reference images using Reve AI's remix capabilities.","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"topaz-labs/sharpen","type":"image","info":{"developer":"Topaz Labs","description":"Sharpening model for enhancing image clarity and reducing blur","name":"Sharpen","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"topaz-labs/sharpen-gen","type":"image","info":{"developer":"Topaz Labs","description":"","name":"Sharpen Generative","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"x-ai/grok-imagine-image","type":"image","info":{"developer":"X AI","description":"Grok Imagine Image is a lightweight text-to-image model by xAI optimized for fast and cost-efficient image generation.","name":"Grok Imagine Image","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"x-ai/grok-imagine-image-pro","type":"image","info":{"developer":"X AI","description":"Grok Imagine Image Pro is an advanced text-to-image model by xAI designed for high-quality image generation with improved detail and prompt fidelity.","name":"Grok Imagine Image Pro","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"magic/image-to-3d","type":"image","info":{"developer":"Magic Inc","description":"","name":"Magic image-to-3d","url":"","docs_url":""},"features":[],"endpoints":["/v1/images/generations"]},{"id":"text-embedding-3-small","type":"embedding","info":{"name":"Text embedding 3 small","developer":"Open AI","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/openai/text-embedding-3-small","contextLength":8000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"text-embedding-3-large","type":"embedding","info":{"name":"Text embedding 3 large","developer":"Open AI","description":"High-performance embedding model with flexible dimensions and superior accuracy.","url":"https://aimlapi.com/models/text-embedding-3-large","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/openai/text-embedding-3-large","contextLength":8000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"text-embedding-ada-002","type":"embedding","info":{"name":"Text embedding ada 002","developer":"Open AI","description":"Reliable embedding model offering solid performance for various tasks.","url":"https://aimlapi.com/models/text-embedding-ada-002","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/openai/text-embedding-ada-002","contextLength":8000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"togethercomputer/m2-bert-80M-32k-retrieval","type":"embedding","info":{"name":"M2 bert 80M 32k retrieval","developer":"Together AI","description":"High-capacity AI model for comprehensive search and information retrieval. API for M2-BERT-Retrieval-32k.","url":"https://aimlapi.com/models/m2-bert-retrieval-32k","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/together-ai/m2-bert-80m-retrieval","contextLength":32000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"BAAI/bge-base-en-v1.5","type":"embedding","info":{"name":"Bge base en v1.5","developer":"BAAI","description":"AI model for generating precise language embeddings. API for BAAI-Bge-Base-1p5.","url":"https://aimlapi.com/models/baai-bge-base-1p5","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/baai/bge-base-en","contextLength":null},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-large-2-instruct","type":"embedding","info":{"name":"Voyage large 2 instruct","developer":"Anthropic","description":"Voyage Large 2 Instruct excels in text embedding tasks.","url":"https://aimlapi.com/models/voyage-large-2-instruct-api","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-2","contextLength":16000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-finance-2","type":"embedding","info":{"name":"Voyage finance 2","developer":"Anthropic","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-finance-2","contextLength":32000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-multilingual-2","type":"embedding","info":{"name":"Voyage multilingual 2","developer":"Anthropic","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-multilingual-2","contextLength":32000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-law-2","type":"embedding","info":{"name":"Voyage law 2","developer":"Anthropic","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-law-2","contextLength":16000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-code-2","type":"embedding","info":{"name":"Voyage code 2","developer":"Anthropic","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-code-2","contextLength":16000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-large-2","type":"embedding","info":{"name":"Voyage large 2","developer":"Anthropic","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-large-2","contextLength":16000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"voyage-2","type":"embedding","info":{"name":"Voyage 2","developer":"Anthropic","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/anthropic/voyage-2","contextLength":4000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"text-multilingual-embedding-002","type":"embedding","info":{"name":"Text multilingual embedding 002","developer":"Google","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/google/text-multilingual-embedding-002","contextLength":2000},"features":[],"endpoints":["/v1/embeddings"]},{"id":"alibaba/qwen-text-embedding-v4","type":"embedding","info":{"name":"Qwen Text Embedding v4","developer":"Alibaba Cloud","description":"Advanced text embedding model from the Qwen3-Embedding series with support for custom dimensions (64-2048) and over 100 languages. Ideal for semantic search, clustering, and classification tasks.","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/alibaba-cloud/qwen-text-embedding-v4","contextLength":8192},"features":[],"endpoints":["/v1/embeddings"]},{"id":"alibaba/qwen-text-embedding-v3","type":"embedding","info":{"name":"Qwen Text Embedding v3","developer":"Alibaba Cloud","description":"High-quality text embedding model with support for custom dimensions (512-1024) and over 50 languages. Suitable for semantic search, recommendation systems, and text analysis.","url":"","docs_url":"https://docs.aimlapi.com/api-references/embedding-models/alibaba-cloud/qwen-text-embedding-v3","contextLength":8192},"features":[],"endpoints":["/v1/embeddings"]},{"id":"#g1_nova-2-general","type":"stt","info":{"name":"#g1 nova 2 general","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-meeting","type":"stt","info":{"name":"#g1 nova 2 meeting","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-phonecall","type":"stt","info":{"name":"#g1 nova 2 phonecall","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-voicemail","type":"stt","info":{"name":"#g1 nova 2 voicemail","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-finance","type":"stt","info":{"name":"#g1 nova 2 finance","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-conversationalai","type":"stt","info":{"name":"#g1 nova 2 conversationalai","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-video","type":"stt","info":{"name":"#g1 nova 2 video","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-medical","type":"stt","info":{"name":"#g1 nova 2 medical","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-drivethru","type":"stt","info":{"name":"#g1 nova 2 drivethru","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_nova-2-automotive","type":"stt","info":{"name":"#g1 nova 2 automotive","developer":"Deepgram","description":"Nova-2: Advanced, versatile ASR model for diverse transcription needs.","url":"https://aimlapi.com/models/deepgram-nova-2","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_whisper-large","type":"stt","info":{"name":"#g1 whisper large","developer":"Deepgram","description":"Whisper: Multilingual speech recognition model, robust, versatile, open-source.","url":"https://aimlapi.com/models/whisper","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_whisper-medium","type":"stt","info":{"name":"#g1 whisper medium","developer":"Deepgram","description":"Whisper: Multilingual speech recognition model, robust, versatile, open-source.","url":"https://aimlapi.com/models/whisper","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_whisper-small","type":"stt","info":{"name":"#g1 whisper small","developer":"Deepgram","description":"Whisper: Multilingual speech recognition model, robust, versatile, open-source.","url":"https://aimlapi.com/models/whisper","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_whisper-tiny","type":"stt","info":{"name":"#g1 whisper tiny","developer":"Deepgram","description":"Whisper: Multilingual speech recognition model, robust, versatile, open-source.","url":"https://aimlapi.com/models/whisper","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_whisper-base","type":"stt","info":{"name":"#g1 whisper base","developer":"Deepgram","description":"Whisper: Multilingual speech recognition model, robust, versatile, open-source.","url":"https://aimlapi.com/models/whisper","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/deepgram/nova-2"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"#g1_aura-asteria-en","type":"tts","info":{"name":"#g1 aura asteria en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-hera-en","type":"tts","info":{"name":"#g1 aura hera en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-luna-en","type":"tts","info":{"name":"#g1 aura luna en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-stella-en","type":"tts","info":{"name":"#g1 aura stella en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-athena-en","type":"tts","info":{"name":"#g1 aura athena en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-zeus-en","type":"tts","info":{"name":"#g1 aura zeus en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-orion-en","type":"tts","info":{"name":"#g1 aura orion en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-arcas-en","type":"tts","info":{"name":"#g1 aura arcas en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-perseus-en","type":"tts","info":{"name":"#g1 aura perseus en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-angus-en","type":"tts","info":{"name":"#g1 aura angus en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-orpheus-en","type":"tts","info":{"name":"#g1 aura orpheus en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-helios-en","type":"tts","info":{"name":"#g1 aura helios en","developer":"Deepgram","description":"Aura is a real-time TTS model with human-like voices for conversational AI applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-amalthea-en","type":"tts","info":{"name":"#g1 aura-2 amalthea en","developer":"Deepgram","description":"Aura-2 Filipino-accented female voice, engaging and cheerful. Perfect for casual chat applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-andromeda-en","type":"tts","info":{"name":"#g1 aura-2 andromeda en","developer":"Deepgram","description":"Aura-2 American female voice, casual and expressive. Ideal for customer service and IVR applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-apollo-en","type":"tts","info":{"name":"#g1 aura-2 apollo en","developer":"Deepgram","description":"Aura-2 American male voice, confident and comfortable. Great for casual chat applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-arcas-en","type":"tts","info":{"name":"#g1 aura-2 arcas en","developer":"Deepgram","description":"Aura-2 American male voice, natural and smooth. Perfect for customer service and casual chat.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-aries-en","type":"tts","info":{"name":"#g1 aura-2 aries en","developer":"Deepgram","description":"Aura-2 American male voice, warm and energetic. Ideal for casual chat applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-asteria-en","type":"tts","info":{"name":"#g1 aura-2 asteria en","developer":"Deepgram","description":"Aura-2 American female voice, clear and confident. Perfect for advertising applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-athena-en","type":"tts","info":{"name":"#g1 aura-2 athena en","developer":"Deepgram","description":"Aura-2 American mature female voice, calm and professional. Excellent for storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-atlas-en","type":"tts","info":{"name":"#g1 aura-2 atlas en","developer":"Deepgram","description":"Aura-2 American mature male voice, enthusiastic and confident. Great for advertising.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-aurora-en","type":"tts","info":{"name":"#g1 aura-2 aurora en","developer":"Deepgram","description":"Aura-2 American female voice, cheerful and expressive. Perfect for interview applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-callista-en","type":"tts","info":{"name":"#g1 aura-2 callista en","developer":"Deepgram","description":"Aura-2 American female voice, clear and energetic. Ideal for IVR applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-cora-en","type":"tts","info":{"name":"#g1 aura-2 cora en","developer":"Deepgram","description":"Aura-2 American female voice, smooth and melodic. Perfect for storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-cordelia-en","type":"tts","info":{"name":"#g1 aura-2 cordelia en","developer":"Deepgram","description":"Aura-2 American young female voice, approachable and warm. Great for storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-delia-en","type":"tts","info":{"name":"#g1 aura-2 delia en","developer":"Deepgram","description":"Aura-2 American young female voice, casual and friendly. Ideal for interview applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-draco-en","type":"tts","info":{"name":"#g1 aura-2 draco en","developer":"Deepgram","description":"Aura-2 British male voice with baritone, warm and trustworthy. Perfect for storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-electra-en","type":"tts","info":{"name":"#g1 aura-2 electra en","developer":"Deepgram","description":"Aura-2 American female voice, professional and engaging. Great for IVR, advertising, and customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-harmonia-en","type":"tts","info":{"name":"#g1 aura-2 harmonia en","developer":"Deepgram","description":"Aura-2 American female voice, empathetic and calm. Perfect for customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-helena-en","type":"tts","info":{"name":"#g1 aura-2 helena en","developer":"Deepgram","description":"Aura-2 American female voice, caring and natural with a raspy quality. Ideal for IVR and casual chat.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-hera-en","type":"tts","info":{"name":"#g1 aura-2 hera en","developer":"Deepgram","description":"Aura-2 American female voice, smooth and professional. Great for informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-hermes-en","type":"tts","info":{"name":"#g1 aura-2 hermes en","developer":"Deepgram","description":"Aura-2 American male voice, expressive and engaging. Perfect for informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-hyperion-en","type":"tts","info":{"name":"#g1 aura-2 hyperion en","developer":"Deepgram","description":"Aura-2 Australian male voice, caring and empathetic. Ideal for interview applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-iris-en","type":"tts","info":{"name":"#g1 aura-2 iris en","developer":"Deepgram","description":"Aura-2 American young female voice, cheerful and positive. Great for IVR, advertising, and customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-janus-en","type":"tts","info":{"name":"#g1 aura-2 janus en","developer":"Deepgram","description":"Aura-2 American female voice with Southern accent, smooth and trustworthy. Perfect for storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-juno-en","type":"tts","info":{"name":"#g1 aura-2 juno en","developer":"Deepgram","description":"Aura-2 American female voice, natural and melodic with breathy quality. Ideal for interview applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-jupiter-en","type":"tts","info":{"name":"#g1 aura-2 jupiter en","developer":"Deepgram","description":"Aura-2 American male voice with baritone, expressive and knowledgeable. Great for informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-luna-en","type":"tts","info":{"name":"#g1 aura-2 luna en","developer":"Deepgram","description":"Aura-2 American young female voice, friendly and natural. Perfect for IVR applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-mars-en","type":"tts","info":{"name":"#g1 aura-2 mars en","developer":"Deepgram","description":"Aura-2 American male voice with baritone, smooth and patient. Ideal for customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-minerva-en","type":"tts","info":{"name":"#g1 aura-2 minerva en","developer":"Deepgram","description":"Aura-2 American female voice, positive and friendly. Great for storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-neptune-en","type":"tts","info":{"name":"#g1 aura-2 neptune en","developer":"Deepgram","description":"Aura-2 American male voice, professional and patient. Perfect for customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-odysseus-en","type":"tts","info":{"name":"#g1 aura-2 odysseus en","developer":"Deepgram","description":"Aura-2 American male voice, calm and comfortable. Ideal for advertising applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-ophelia-en","type":"tts","info":{"name":"#g1 aura-2 ophelia en","developer":"Deepgram","description":"Aura-2 American female voice, expressive and enthusiastic. Great for interview applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-orion-en","type":"tts","info":{"name":"#g1 aura-2 orion en","developer":"Deepgram","description":"Aura-2 American male voice, approachable and calm. Perfect for informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-orpheus-en","type":"tts","info":{"name":"#g1 aura-2 orpheus en","developer":"Deepgram","description":"Aura-2 American male voice, professional and confident. Great for customer service and storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-pandora-en","type":"tts","info":{"name":"#g1 aura-2 pandora en","developer":"Deepgram","description":"Aura-2 British female voice, smooth and melodic with breathy quality. Ideal for IVR and informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-phoebe-en","type":"tts","info":{"name":"#g1 aura-2 phoebe en","developer":"Deepgram","description":"Aura-2 American female voice, energetic and warm. Perfect for customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-pluto-en","type":"tts","info":{"name":"#g1 aura-2 pluto en","developer":"Deepgram","description":"Aura-2 American male voice with baritone, smooth and empathetic. Great for interview and storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-saturn-en","type":"tts","info":{"name":"#g1 aura-2 saturn en","developer":"Deepgram","description":"Aura-2 American male voice with baritone, knowledgeable and confident. Ideal for customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-selene-en","type":"tts","info":{"name":"#g1 aura-2 selene en","developer":"Deepgram","description":"Aura-2 American female voice, expressive and engaging. Perfect for informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-thalia-en","type":"tts","info":{"name":"#g1 aura-2 thalia en","developer":"Deepgram","description":"Aura-2 American female voice, clear and enthusiastic. Great for casual chat, customer service, and IVR.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-theia-en","type":"tts","info":{"name":"#g1 aura-2 theia en","developer":"Deepgram","description":"Aura-2 Australian female voice, expressive and sincere. Ideal for informative content.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-vesta-en","type":"tts","info":{"name":"#g1 aura-2 vesta en","developer":"Deepgram","description":"Aura-2 American female voice, natural and empathetic. Perfect for customer service, interview, and storytelling.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-zeus-en","type":"tts","info":{"name":"#g1 aura-2 zeus en","developer":"Deepgram","description":"Aura-2 American male voice, deep and trustworthy with smooth delivery. Ideal for various applications.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-celeste-es","type":"tts","info":{"name":"#g1 aura-2 celeste es","developer":"Deepgram","description":"Aura-2 Colombian Spanish young female voice, clear and enthusiastic. Perfect for casual chat, advertising, and IVR.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-estrella-es","type":"tts","info":{"name":"#g1 aura-2 estrella es","developer":"Deepgram","description":"Aura-2 Mexican Spanish mature female voice, approachable and calm. Great for casual chat and interview.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"#g1_aura-2-nestor-es","type":"tts","info":{"name":"#g1 aura-2 nestor es","developer":"Deepgram","description":"Aura-2 Peninsular Spanish male voice, calm and professional. Ideal for casual chat and customer service.","url":"https://aimlapi.com/models/aura","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/deepgram/aura"},"features":[],"endpoints":["/v1/tts"]},{"id":"aai/slam-1","type":"stt","info":{"name":"Slam 1","developer":"AssemblyAI","description":"AssemblyAI's highly customizable speech-to-text model for English-only transcription.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/assembly-ai/slam-1"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"aai/universal","type":"stt","info":{"name":"Universal","developer":"AssemblyAI","description":"AssemblyAI's fastest and most robust speech-to-text model with support for a broad range of languages.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/speech-to-text/assembly-ai/universal"},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"openai/gpt-4o-transcribe","type":"stt","info":{"name":"GPT-4o Transcribe","developer":"Open AI","description":"Advanced speech-to-text model using GPT-4o for improved accuracy and language recognition. 16,000 context window, 2,000 max output tokens.","url":"","docs_url":""},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"openai/gpt-4o-mini-transcribe","type":"stt","info":{"name":"GPT-4o Mini Transcribe","developer":"Open AI","description":"Efficient speech-to-text model using GPT-4o mini with improved accuracy compared to Whisper. 16,000 context window, 2,000 max output tokens.","url":"","docs_url":""},"features":[],"endpoints":["/v1/stt/create","/v1/stt"]},{"id":"openai/gpt-4o-mini-tts","type":"tts","info":{"name":"GPT-4o mini TTS","developer":"Open AI","description":"GPT-4o mini TTS is a text-to-speech model built on GPT-4o mini, a fast and powerful language model. Use it to convert text to natural sounding spoken text with customizable voice characteristics through instructions. The maximum number of input tokens is 2000.","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"openai/tts-1","type":"tts","info":{"name":"TTS-1","developer":"Open AI","description":"TTS-1 is a real-time text-to-speech model optimized for speed. Provides lower latency but at a lower quality than TTS-1 HD.","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"openai/tts-1-hd","type":"tts","info":{"name":"TTS-1 HD","developer":"Open AI","description":"TTS-1 HD is a high-quality text-to-speech model that provides superior audio quality compared to TTS-1.","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"elevenlabs/eleven_multilingual_v2","type":"tts","info":{"name":"Eleven Multilingual v2","developer":"Elevenlabs","description":"State-of-the-art multilingual text-to-speech model supporting 29 languages with rich emotional range.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/elevenlabs/eleven_multilingual_v2"},"features":[],"endpoints":["/v1/tts"]},{"id":"elevenlabs/eleven_turbo_v2_5","type":"tts","info":{"name":"Eleven Turbo v2.5","developer":"Elevenlabs","description":"Low-latency text-to-speech model optimized for real-time applications and rapid response.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/elevenlabs/eleven_turbo_v2_5"},"features":[],"endpoints":["/v1/tts"]},{"id":"elevenlabs/v3_alpha","type":"tts","info":{"name":"Eleven v3 Alpha","developer":"Elevenlabs","description":"The next generation of ElevenLabs speech synthesis featuring enhanced naturalness and prosody.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech/elevenlabs"},"features":[],"endpoints":["/v1/tts"]},{"id":"minimax/speech-2.5-turbo-preview","type":"tts","info":{"name":"Speech 2.5 Turbo Preview","developer":"Minimax AI","description":"The brand new Turbo model. Ultimate Value, 40 Languages - supports synchronous text-to-speech audio generation with 100+ existing voices and multiple audio specifications.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech"},"features":[],"endpoints":["/v1/tts"]},{"id":"minimax/speech-2.5-hd-preview","type":"tts","info":{"name":"Speech 2.5 HD Preview","developer":"Minimax AI","description":"The brand new HD model. Ultimate Similarity, Ultra-High Quality - supports synchronous text-to-speech audio generation with superior rhythm and stability.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech"},"features":[],"endpoints":["/v1/tts"]},{"id":"minimax/speech-2.6-hd","type":"tts","info":{"name":"Speech 2.6 HD","developer":"Minimax AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"minimax/speech-2.6-turbo","type":"tts","info":{"name":"Speech 2.6 Turbo","developer":"Minimax AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"microsoft/vibevoice-7b","type":"tts","info":{"name":"VibeVoice 7B","developer":"Microsoft","description":"VibeVoice 7B is a high-quality text-to-speech model that supports multi-speaker dialogue generation with natural-sounding voices.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech"},"features":[],"endpoints":["/v1/tts"]},{"id":"microsoft/vibevoice-1.5b","type":"tts","info":{"name":"VibeVoice 1.5B","developer":"Microsoft","description":"VibeVoice 1.5B is an efficient text-to-speech model that supports multi-speaker dialogue generation with natural-sounding voices.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech"},"features":[],"endpoints":["/v1/tts"]},{"id":"alibaba/qwen3-tts-flash","type":"tts","info":{"name":"Qwen3-TTS Flash","developer":"Alibaba Cloud","description":"Qwen3-TTS offers 17 voices and supports multiple languages and dialects including Chinese. The system can adapt its tone and smoothly process complex text.","url":"","docs_url":"https://docs.aimlapi.com/api-references/speech-models/text-to-speech"},"features":[],"endpoints":["/v1/tts"]},{"id":"hume/octave-2","type":"tts","info":{"name":"Octave 2","developer":"Hume AI","description":"Hume Octave 2 is a high-quality text-to-speech model featuring expressive emotional prosody for natural sounding interactions.","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"inworld/tts-1","type":"tts","info":{"name":"Inworld TTS 1","developer":"Inworld AI","description":"High-quality text-to-speech model with natural-sounding voices","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"inworld/tts-1-max","type":"tts","info":{"name":"Inworld TTS 1 Max","developer":"Inworld AI","description":"Premium text-to-speech model with ultra-realistic speech generation","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"inworld/tts-1-5-max","type":"tts","info":{"name":"Inworld TTS 1.5 Max","developer":"Inworld AI","description":"Maximum-quality Inworld TTS 1.5 model specialized for expressive and stable vocal production.","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"inworld/tts-1-5-mini","type":"tts","info":{"name":"Inworld TTS 1.5 Mini","developer":"Inworld AI","description":"Efficient Inworld TTS 1.5 variant optimized for rapid synthesis and real-time response.","url":"","docs_url":""},"features":[],"endpoints":["/v1/tts"]},{"id":"gpt-3.5-turbo-instruct","type":"language-completion","info":{"name":"Gpt 3.5 turbo instruct","developer":"Open AI","description":"OpenAI GPT-3.5-Turbo-Instruct: Streamlined AI for precise, instruction-driven interactions.","contextLength":4000,"url":"https://aimlapi.com/models/chat-gpt-3-5-turbo-instruct","docs_url":"https://docs.aimlapi.com/capabilities/completion-or-chat-models"},"features":["openai/completion"],"endpoints":["/v1/completions"]},{"id":"openai/sora-2-t2v","type":"video","info":{"name":"Sora 2 Text to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"openai/sora-2-i2v","type":"video","info":{"name":"Sora 2 Image to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"openai/sora-2-pro-i2v","type":"video","info":{"name":"Sora 2 Pro Image to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"openai/sora-2-pro-t2v","type":"video","info":{"name":"Sora 2 Pro Text to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"video-01","type":"video","info":{"name":"Video 01","developer":"Minimax AI","description":"Minimax Video-01 generates high-quality videos from text prompts or image using advanced AI techniques for diverse creative applications.","url":"https://aimlapi.com/models/minimax-video-01-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/minimax/video-01"},"features":[],"endpoints":["/v2/generate/video/minimax/generation","/v2/video/generations"]},{"id":"video-01-live2d","type":"video","info":{"name":"Video 01 live2d","developer":"Minimax AI","description":"Minimax Video-01 generates high-quality videos from text prompts or image using advanced AI techniques for diverse creative applications.","url":"https://aimlapi.com/models/minimax-video-01-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/minimax/video-01-live2d"},"features":[],"endpoints":["/v2/generate/video/minimax/generation","/v2/video/generations"]},{"id":"minimax/hailuo-02","type":"video","info":{"name":"Hailuo 02","developer":"Minimax AI","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/minimax/hailuo-02"},"features":[],"endpoints":["/v2/generate/video/minimax/generation","/v2/video/generations"]},{"id":"minimax/hailuo-2.3","type":"video","info":{"name":"Hailuo 2.3","developer":"Minimax AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"minimax/hailuo-2.3-fast","type":"video","info":{"name":"Hailuo 2.3 Fast","developer":"Minimax AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"kling-video/v1.6/standard/text-to-video","type":"video","info":{"name":"Kling 1.6 Standard text-to-video","developer":"Kling AI","description":"Kling 1.6 enhances video generation with improved prompt adherence and dynamic visuals for diverse creative applications.","url":"https://aimlapi.com/models/kling-1-6-standard","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1.6-standard-text-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.6/standard/image-to-video","type":"video","info":{"name":"Kling 1.6 Standard image-to-video","developer":"Kling AI","description":"Kling 1.6 enhances video generation with improved prompt adherence and dynamic visuals for diverse creative applications.","url":"https://aimlapi.com/models/kling-1-6-standard","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1.6-standart-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"pixverse/v5/transition","type":"video","info":{"name":"Pixverse v5 Transition","developer":"Pixverse","description":"Create seamless transitions between images using PixVerse v5 with advanced AI techniques.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models"},"features":[],"endpoints":["/v2/generate/video/pixverse/generation","/v2/video/generations"]},{"id":"pixverse/v5/image-to-video","type":"video","info":{"name":"Pixverse v5 Image to Video","developer":"Pixverse","description":"Generate high-quality videos from images using PixVerse v5 with advanced AI video generation.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models"},"features":[],"endpoints":["/v2/generate/video/pixverse/generation","/v2/video/generations"]},{"id":"pixverse/v5/text-to-video","type":"video","info":{"name":"Pixverse v5 Text to Video","developer":"Pixverse","description":"Create dynamic videos from text prompts using PixVerse v5 advanced text-to-video AI technology.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models"},"features":[],"endpoints":["/v2/generate/video/pixverse/generation","/v2/video/generations"]},{"id":"pixverse/lip-sync","type":"video","info":{"name":"Pixverse Lip Sync","developer":"Pixverse","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/generate/video/pixverse/generation","/v2/video/generations"]},{"id":"pixverse/v5-5-text-to-video","type":"video","info":{"name":"Pixverse v5.5 Text to Video","developer":"Pixverse","description":"Create dynamic videos from text prompts using PixVerse v5.5 with audio generation, multi-clip support, and prompt optimization.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models"},"features":[],"endpoints":["/v2/generate/video/pixverse/generation","/v2/video/generations"]},{"id":"pixverse/v5-5-image-to-video","type":"video","info":{"name":"Pixverse v5.5 Image to Video","developer":"Pixverse","description":"Generate high-quality videos from images using PixVerse v5.5 with audio generation, multi-clip support, and prompt optimization.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models"},"features":[],"endpoints":["/v2/generate/video/pixverse/generation","/v2/video/generations"]},{"id":"google/veo-3.1-t2v","type":"video","info":{"name":"Veo3.1 Text-To-Video","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo-3.1-i2v","type":"video","info":{"name":"Veo3.1 Image-To-Video","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo-3.1-first-last-image-to-video","type":"video","info":{"name":"Veo3.1 First-Last-Frame-Image-To-Video","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo-3.1-reference-to-video","type":"video","info":{"name":"Veo3.1 Reference-To-Video","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo-3.1-t2v-fast","type":"video","info":{"name":"Veo3.1 Text-To-Video Fast","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo-3.1-i2v-fast","type":"video","info":{"name":"Veo3.1 Image-To-Video Fast","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo-3.1-first-last-image-to-video-fast","type":"video","info":{"name":"Veo3.1 First-Last-Frame-Image-To-Video Fast","description":"","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo3-1-extend-video","type":"video","info":{"name":"Veo 3.1 Extend Video","description":"Veo 3.1 Extend Video is a video extension (video-to-video) model for continuing an existing clip, with optional audio generation.","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"google/veo3-1-fast-extend-video","type":"video","info":{"name":"Veo 3.1 Fast Extend Video","description":"Veo 3.1 Fast Extend Video provides the same extend workflow at lower cost and faster iteration, also supporting audio on/off modes.","developer":"Google","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/v2.5-turbo/pro/image-to-video","type":"video","info":{"name":"Kling 2.5 Turbo Pro image-to-video","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/v2.5-turbo/pro/text-to-video","type":"video","info":{"name":"Kling 2.5 Turbo Pro text-to-video","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"bytedance/omnihuman","type":"video","info":{"name":"OmniHuman Image to Video","developer":"ByteDance","description":"Generate incredibly accurate videos of human beings speaking a dialogue using ByteDance's OmniHuman model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/bytedance/omnihuman"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"bytedance/omnihuman/v1.5","type":"video","info":{"name":"OmniHuman v1.5","developer":"ByteDance","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/bytedance/omnihuman-1.5"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"veed/fabric-1.0","type":"video","info":{"name":"VEED Fabric-1.0","developer":"VEED","description":"Generate high-quality videos from image and audio inputs using VEED Fabric-1.0 model.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"veed/fabric-1.0-fast","type":"video","info":{"name":"VEED Fabric-1.0 Fast","developer":"VEED","description":"Generate high-quality videos from image and audio inputs with faster processing using VEED Fabric-1.0 Fast model.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"tencent/hunyuan-video-foley","type":"video","info":{"name":"Tencent Hunyuan Video Foley","developer":"Tencent","description":"Generate foley audio for videos using Tencent Hunyuan Video Foley model. Adds realistic sound effects synchronized with video content.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-vace-fun-a14b-depth","type":"video","info":{"name":"Wan 2.2 Vace Depth","developer":"Alibaba Cloud","description":"Generate videos from depth maps using Alibaba WAN-2.2-VACE model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan2.2-vace-fun-a14b-depth-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-vace-fun-a14b-pose","type":"video","info":{"name":"Wan 2.2 Vace Pose","developer":"Alibaba Cloud","description":"Generate pose-guided videos using Alibaba WAN-2.2-VACE model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan2.2-vace-fun-a14b-pose-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-vace-fun-a14b-inpainting","type":"video","info":{"name":"Wan 2.2 Vace Inpainting","developer":"Alibaba Cloud","description":"Generate inpainted videos using Alibaba WAN-2.2-VACE model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan2.2-vace-fun-a14b-inpainting-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-vace-fun-a14b-outpainting","type":"video","info":{"name":"Wan 2.2 Vace Outpainting","developer":"Alibaba Cloud","description":"Generate outpainted videos using Alibaba WAN-2.2-VACE model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan2.2-vace-fun-a14b-outpainting-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-vace-fun-a14b-reframe","type":"video","info":{"name":"Wan 2.2 Vace Reframe","developer":"Alibaba Cloud","description":"Generate reframed videos using Alibaba WAN-2.2-VACE model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan2.2-vace-fun-a14b-reframe-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-14b-animate-move","type":"video","info":{"name":"Wan 2.2 14B Animate Move","developer":"Alibaba Cloud","description":"Generate animated videos from video and image using Alibaba WAN-2.2-14B move model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan-2.2-14b-animate-move-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-14b-animate-replace","type":"video","info":{"name":"Wan 2.2 14B Animate Replace","developer":"Alibaba Cloud","description":"Generate animated videos from video and image using Alibaba WAN-2.2-14B replace model.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan-2.2-14b-animate-replace-image-to-video"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"sber-ai/kandinsky5-t2v","type":"video","info":{"name":"Kandinsky5 Text-to-Video","developer":"Sber AI","description":"Kandinsky5 text-to-video model generates high-quality videos from text prompts. $0.10 for 5s, $0.20 for 10s.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"sber-ai/kandinsky5-distill-t2v","type":"video","info":{"name":"Kandinsky5 Text-to-Video Distill","developer":"Sber AI","description":"Kandinsky5 distilled text-to-video model for faster generation. $0.05 for 5s, $0.10 for 10s.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"krea/krea-wan-14b/text-to-video","type":"video","info":{"name":"Krea WAN 14B Text-to-Video","developer":"Krea","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"krea/krea-wan-14b/video-to-video","type":"video","info":{"name":"Krea WAN 14B Video-to-Video","developer":"Krea","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"ltxv/ltxv-2","type":"video","info":{"name":"LTXV 2","developer":"LTXV","description":"Generate high-quality videos from text prompts or images using LTXV 2 model with support for multiple resolutions and frame rates.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"ltxv/ltxv-2-fast","type":"video","info":{"name":"LTXV 2 Fast","developer":"LTXV","description":"Generate high-quality videos from text prompts or images using LTXV 2 fast model with support for multiple resolutions and frame rates at faster speeds.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/avatar-standard","type":"video","info":{"name":"Kling Avatar Standard","developer":"Kling AI","description":"Kling Avatar creates dynamic videos from avatar reference images with audio synchronization using standard mode for cost-effective generation.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/avatar-pro","type":"video","info":{"name":"Kling Avatar Professional","developer":"Kling AI","description":"Kling Avatar creates dynamic videos from avatar reference images with audio synchronization using professional mode for higher quality and longer duration.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"longcat/distilled/480p","type":"video","info":{"name":"LongCat Video Distilled 480p","developer":"LongCat","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"longcat/distilled/720p","type":"video","info":{"name":"LongCat Video Distilled 720p","developer":"LongCat","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-o1-image-to-video","type":"video","info":{"name":"Kling Video O1 - Image to Video","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-o1-reference-to-video","type":"video","info":{"name":"Kling Video O1 - Reference to Video","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-o1-video-to-video-edit","type":"video","info":{"name":"Kling Video O1 - Video to Video Edit","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-o1-video-to-video-reference","type":"video","info":{"name":"Kling Video O1 - Video to Video Reference","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-v2-6-pro-image-to-video","type":"video","info":{"name":"Kling 2.6 Pro image-to-video","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-v2-6-pro-text-to-video","type":"video","info":{"name":"Kling 2.6 Pro text-to-video","developer":"Kling AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-v3-standard-text-to-video","type":"video","info":{"name":"Kling Video v3 Standard — Text to Video","description":"","developer":"Kling AI","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-v3-standard-image-to-video","type":"video","info":{"name":"Kling Video v3 Standard — Image to Video","description":"","developer":"Kling AI","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-v3-pro-text-to-video","type":"video","info":{"name":"Kling Video v3 Pro — Text to Video","description":"","developer":"Kling AI","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"klingai/video-v3-pro-image-to-video","type":"video","info":{"name":"Kling Video v3 Pro — Image to Video","description":"","developer":"Kling AI","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"gen3a_turbo","type":"video","info":{"developer":"Runway ML","description":"Runway Gen-3 Turbo generates high-quality videos from images quickly and efficiently, enhancing creative workflows","name":"Gen 3a Turbo","url":"https://aimlapi.com/models/runway-gen-3-turbo","docs_url":"https://docs.aimlapi.com/api-references/video-models/runway/gen3a_turbo"},"features":[],"endpoints":["/v2/generate/video/runway/generation","/v2/video/generations"]},{"id":"runway/gen4_turbo","type":"video","info":{"developer":"Runway ML","description":"Runway Gen-4 Turbo generates high-quality videos from images and text with fast processing and improved motion consistency.","name":"Gen 4 Turbo","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/runway/gen4_turbo"},"features":[],"endpoints":["/v2/generate/video/runway/generation","/v2/video/generations"]},{"id":"runway/act_two","type":"video","info":{"developer":"Runway ML","description":"Runway Act Two creates expressive, full-body performance videos by transferring motion from a driving video to a subject.","name":"Runway Act Two","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/runway/act_two"},"features":[],"endpoints":["/v2/generate/video/runway/generation","/v2/video/generations"]},{"id":"runway/gen4_aleph","type":"video","info":{"developer":"Runway ML","description":"Runway Gen-4 Aleph is an advanced video generation model with enhanced realism and creative control.","name":"Gen 4 Aleph","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/runway/gen4_aleph"},"features":[],"endpoints":["/v2/generate/video/runway/generation","/v2/video/generations"]},{"id":"bytedance/seedance-1-0-lite-t2v","type":"video","info":{"developer":"ByteDance","description":"","name":"Seedance 1.0 lite Text to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/bytedance/seedance-1.0-lite-text-to-video"},"features":[],"endpoints":["/v2/generate/video/bytedance/generation","/v2/video/generations"]},{"id":"bytedance/seedance-1-0-lite-i2v","type":"video","info":{"developer":"ByteDance","description":"","name":"Seedance 1.0 lite Image to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/bytedance/seedance-1.0-lite-image-to-video"},"features":[],"endpoints":["/v2/generate/video/bytedance/generation","/v2/video/generations"]},{"id":"bytedance/seedance-1-0-pro-t2v","type":"video","info":{"developer":"ByteDance","description":"","name":"Seedance 1.0 Pro Text to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/bytedance/seedance-1.0-pro-text-to-video"},"features":[],"endpoints":["/v2/generate/video/bytedance/generation","/v2/video/generations"]},{"id":"bytedance/seedance-1-0-pro-i2v","type":"video","info":{"developer":"ByteDance","description":"","name":"Seedance 1.0 Pro Image to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/bytedance/seedance-1.0-pro-image-to-video"},"features":[],"endpoints":["/v2/generate/video/bytedance/generation","/v2/video/generations"]},{"id":"bytedance/seedance-1-0-pro-fast","type":"video","info":{"developer":"ByteDance","description":"","name":"Seedance 1.0 Pro Fast","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan2.2-i2v-plus","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.2 Plus Image to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud"},"features":[],"endpoints":["/v2/generate/video/alibaba/generation","/v2/video/generations"]},{"id":"alibaba/wan2.2-t2v-plus","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.2 Plus Text to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan-2.2-plus-text-to-video"},"features":[],"endpoints":["/v2/generate/video/alibaba/generation","/v2/video/generations"]},{"id":"alibaba/wan2.1-t2v-turbo","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.1 Turbo Text to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan-2.1-turbo-text-to-video"},"features":[],"endpoints":["/v2/generate/video/alibaba/generation","/v2/video/generations"]},{"id":"alibaba/wan2.1-t2v-plus","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.1 Plus Text to Video","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/alibaba-cloud/wan-2.1-plus-text-to-video"},"features":[],"endpoints":["/v2/generate/video/alibaba/generation","/v2/video/generations"]},{"id":"alibaba/wan2.5-t2v-preview","type":"video","info":{"developer":"Alibaba Cloud","description":"Generate high-quality videos from text prompts using Alibaba Cloud's WAN 2.5 text-to-video model with support for 480p, 720p, and 1080p resolutions.","name":"Wan 2.5 Preview Text-to-Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/generate/video/alibaba/generation","/v2/video/generations"]},{"id":"alibaba/wan2.5-i2v-preview","type":"video","info":{"developer":"Alibaba Cloud","description":"Generate high-quality videos from images using Alibaba Cloud's WAN 2.5 image-to-video model with support for 480p, 720p, and 1080p resolutions.","name":"Wan 2.5 Preview Image-to-Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/generate/video/alibaba/generation","/v2/video/generations"]},{"id":"alibaba/wan-2-6-t2v","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.6 - Text-to-Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan-2-6-i2v","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.6 - Image-to-Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan-2-6-image-to-video-flash","type":"video","info":{"developer":"Alibaba Cloud","description":"Wan 2.6 Image-to-Video Flash - Fast image-to-video model with optional synchronized audio track, supporting up to 15 seconds generation","name":"Wan 2.6 Image-to-Video Flash","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"alibaba/wan-2-6-r2v","type":"video","info":{"developer":"Alibaba Cloud","description":"","name":"Wan 2.6 - Reference-to-Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"veo2/image-to-video","type":"video","info":{"name":"Veo2 Image-to-Video","description":"Veo2 Image-to-Video: Google's AI transforming still images into dynamic videos","developer":"Google","url":"https://aimlapi.com/models/veo-2-image-to-video-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/google/veo2-image-to-video"},"features":[],"endpoints":["/v2/generate/video/google/generation","/v2/video/generations"]},{"id":"veo2","type":"video","info":{"name":"Veo2 Text-to-Video","description":"Veo2: Google's advanced text-to-video model","developer":"Google","url":"https://aimlapi.com/models/veo-2-text-to-video-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/google/veo2-text-to-video"},"features":[],"endpoints":["/v2/generate/video/google/generation","/v2/video/generations"]},{"id":"google/veo3","type":"video","info":{"name":"Veo3 Text-to-Video","description":"","developer":"Google","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/google/veo3-text-to-video"},"features":[],"endpoints":["/v2/generate/video/google/generation","/v2/video/generations"]},{"id":"google/veo-3.0-i2v","type":"video","info":{"name":"Veo3 Image-to-Video","description":"","developer":"Google","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/google/veo-3-image-to-video"},"features":[],"endpoints":["/v2/generate/video/google/generation","/v2/video/generations"]},{"id":"google/veo-3.0-fast","type":"video","info":{"name":"Veo3 Text-To-Video Fast","description":"","developer":"Google","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/google/veo-3-fast-text-to-video"},"features":[],"endpoints":["/v2/generate/video/google/generation","/v2/video/generations"]},{"id":"google/veo-3.0-i2v-fast","type":"video","info":{"name":"Veo3 Image-to-Video Fast","description":"","developer":"Google","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/google/veo-3-fast-image-to-video"},"features":[],"endpoints":["/v2/generate/video/google/generation","/v2/video/generations"]},{"id":"kling-video/v1/standard/image-to-video","type":"video","info":{"name":"Kling 1.0 Standard image-to-video","developer":"Kling AI","description":"Kling AI Image-to-Video generates high-quality videos from images using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-image-to-video","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1-standard-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1/standard/text-to-video","type":"video","info":{"name":"Kling 1.0 Standard text-to-video","developer":"Kling AI","description":"Kling AI Text-to-Video generates high-quality videos from text prompts using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-text-to-video-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1-standard-text-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1/pro/image-to-video","type":"video","info":{"name":"Kling 1.0 Pro image-to-video","developer":"Kling AI","description":"Kling AI Image-to-Video generates high-quality videos from images using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-image-to-video","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1-pro-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1/pro/text-to-video","type":"video","info":{"name":"Kling 1.0 Pro text-to-video","developer":"Kling AI","description":"Kling AI Text-to-Video generates high-quality videos from text prompts using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-text-to-video-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1-pro-text-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.5/standard/image-to-video","type":"video","info":{"name":"Kling 1.5 Standard image-to-video","developer":"Kling AI","description":"Kling AI 1.5 Image-to-Video generates high-quality videos from images using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-image-to-video","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.5/pro/image-to-video","type":"video","info":{"name":"Kling 1.5 Pro image-to-video","developer":"Kling AI","description":"Kling AI 1.5 Image-to-Video generates high-quality videos from images using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-image-to-video","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.5/pro/text-to-video","type":"video","info":{"name":"Kling 1.5 Pro text-to-video","developer":"Kling AI","description":"Kling AI 1.5 Text-to-Video generates high-quality videos from text prompts using advanced AI techniques for dynamic visual storytelling.","url":"https://aimlapi.com/models/kling-ai-text-to-video-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.6/pro/image-to-video","type":"video","info":{"name":"Kling 1.6 Pro image-to-video","developer":"Kling AI","description":"Kling 1.6 enhances video generation with improved prompt adherence and dynamic visuals for diverse creative applications.","url":"https://aimlapi.com/models/kling-1-6-pro-api","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1.6-pro-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.6/pro/text-to-video","type":"video","info":{"name":"Kling 1.6 Pro text-to-video","developer":"Kling AI","description":"Kling 1.6 Pro text-to-video generates high-quality videos from text prompts with enhanced motion and visual fidelity.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1.6-pro-text-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v1.6/standard/multi-image-to-video","type":"video","info":{"name":"Kling 1.6 Standard multi-image-to-video","developer":"Kling AI","description":"Kling 1.6 multi-image-to-video creates dynamic videos from multiple input images with enhanced temporal consistency and natural transitions.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/kling-video-v1.6-pro-effects","type":"video","info":{"name":"Kling 1.6 Pro Effects","developer":"Kling AI","description":"Kling 1.6 Pro Effects applies cinematic visual effects to videos using advanced AI motion control.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1.6-pro-effects"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/kling-video-v1.6-standard-effects","type":"video","info":{"name":"Kling 1.6 Standard Effects","developer":"Kling AI","description":"Kling 1.6 Standard Effects applies AI-driven visual effects to videos at a cost-effective quality tier.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v1.6-standard-effects"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/v2-master-text-to-video","type":"video","info":{"name":"Kling 2.0 Master text-to-video","developer":"Kling AI","description":"Kling 2.0 Master text-to-video generates high-fidelity videos from text prompts with state-of-the-art motion realism.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v2-master-text-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/v2-master-image-to-video","type":"video","info":{"name":"Kling 2.0 Master image-to-video","developer":"Kling AI","description":"Kling 2.0 Master image-to-video generates high-fidelity videos from images with state-of-the-art motion realism.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v2-master-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/v2.1-master-text-to-video","type":"video","info":{"name":"Kling 2.1 Master text-to-video","developer":"Kling AI","description":"Kling 2.1 Master text-to-video generates premium videos from text with the highest quality in the Kling 2.1 series.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v2.1-master-text-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/v2.1-master-image-to-video","type":"video","info":{"name":"Kling 2.1 Master image-to-video","developer":"Kling AI","description":"Kling 2.1 Master image-to-video generates high-fidelity videos with the highest motion quality in the Kling 2.1 lineup.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v2.1-master-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v2.1/standard/image-to-video","type":"video","info":{"name":"Kling 2.1 Standard image-to-video","developer":"Kling AI","description":"Kling 2.1 enhances video generation with improved prompt adherence for diverse creative applications.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v2.1-standard-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"kling-video/v2.1/pro/image-to-video","type":"video","info":{"name":"Kling 2.1 Pro image-to-video","developer":"Kling AI","description":"Kling 2.1 enhances video generation with improved prompt adherence for diverse creative applications.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/kling-ai/v2.1-pro-image-to-video"},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"klingai/video-v2-6-pro-motion-control","type":"video","info":{"name":"Kling 2.6 Pro motion-control","developer":"Kling AI","description":"Kling 2.6 Pro Motion Control enables precise camera and subject motion control for professional video generation.","url":"","docs_url":""},"features":[],"endpoints":["/v2/generate/video/kling/generation","/v2/video/generations"]},{"id":"luma/ray-2","type":"video","info":{"name":"Luma Dream Machine Ray-2","developer":"Luma AI","description":"Luma Ray-2 generates high-quality videos from text prompts or images with advanced AI techniques.","url":"","docs_url":"https://docs.aimlapi.com/api-references/video-models/luma-ai/ray-2"},"features":[],"endpoints":["/v2/video/generations"]},{"id":"luma/ray-flash-2","type":"video","info":{"name":"Luma Dream Machine Ray-Flash-2","developer":"Luma AI","description":"Luma Ray-Flash-2 is a faster, more cost-effective model for high-resolution video generation.","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"fallback-openai/sora-2-t2v","type":"video","info":{"name":"Sora 2 Text to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"fallback-openai/sora-2-i2v","type":"video","info":{"name":"Sora 2 Image to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"fallback-openai/sora-2-pro-i2v","type":"video","info":{"name":"Sora 2 Pro Image to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"fallback-openai/sora-2-pro-t2v","type":"video","info":{"name":"Sora 2 Pro Text to Video","developer":"Open AI","description":"","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"magic/image-to-video","type":"video","info":{"developer":"Magic Inc","description":"","name":"Magic Image to Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"magic/text-to-video","type":"video","info":{"developer":"Magic Inc","description":"","name":"Magic Text to Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"magic/video-to-video","type":"video","info":{"developer":"Magic Inc","description":"","name":"Magic Image to Video","url":"","docs_url":""},"features":[],"endpoints":["/v2/video/generations"]},{"id":"stable-audio","type":"audio","info":{"name":"Stable audio","developer":"Stability AI","description":"Stable Audio generates high-quality audio from text prompts with innovative features like audio transformation and extensive creative control.","url":"https://aimlapi.com/models/stable-audio","docs_url":"https://docs.aimlapi.com/api-references/music-models/stability-ai/stable-audio"},"features":[],"endpoints":["/v2/generate/audio"]},{"id":"minimax-music","type":"audio","info":{"name":"Minimax music","developer":"Minimax AI","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/music-models/minimax/minimax-music-legacy"},"features":[],"endpoints":["/v2/generate/audio"]},{"id":"google/lyria2","type":"audio","info":{"name":"Lyria 2","developer":"Google","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/music-models/google/lyria-2"},"features":[],"endpoints":["/v2/generate/audio"]},{"id":"music-01","type":"audio","info":{"description":"MiniMax Music generates high-quality music from text prompts using advanced AI techniques for diverse musical compositions.","name":"Music 01","developer":"Minimax AI","url":"https://aimlapi.com/models/minimax-music-api","docs_url":"https://docs.aimlapi.com/api-references/music-models/minimax/music-01"},"features":[],"endpoints":["v2/generate/audio/minimax/upload","v2/generate/audio/minimax/generate"]},{"id":"minimax/music-1.5","type":"audio","info":{"description":"MiniMax Music 1.5 provides enhanced music generation quality with refined control over composition styles.","name":"MiniMax Music 1.5","developer":"Minimax AI","url":"","docs_url":""},"features":[],"endpoints":["v2/generate/audio/minimax/generate"]},{"id":"minimax/music-2.0","type":"audio","info":{"description":"MiniMax Music 2.0 delivers superior music generation quality and offers improved control over composition styles.","name":"MiniMax Music 2.0","developer":"Minimax AI","url":"","docs_url":""},"features":[],"endpoints":["v2/generate/audio/minimax/generate"]},{"id":"elevenlabs/eleven_music","type":"audio","info":{"name":"Eleven Music v1","developer":"Elevenlabs","description":"","url":"","docs_url":"https://docs.aimlapi.com/api-references/music-models"},"features":[],"endpoints":["/v2/generate/audio"]}]}