diff --git a/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx b/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx
index fd7557e37c7..c539d739daf 100644
--- a/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx
+++ b/apps/sim/app/(landing)/models/[provider]/[model]/page.tsx
@@ -18,6 +18,7 @@ import {
formatPrice,
formatTokenCount,
formatUpdatedAt,
+ getEffectiveMaxOutputTokens,
getModelBySlug,
getPricingBounds,
getProviderBySlug,
@@ -198,7 +199,8 @@ export default async function ModelPage({
- {model.summary} {model.bestFor}
+ {model.summary}
+ {model.bestFor ? ` ${model.bestFor}` : ''}
@@ -229,13 +231,11 @@ export default async function ModelPage({
? `${formatPrice(model.pricing.cachedInput)}/1M`
: 'N/A'
}
- compact
/>
@@ -280,12 +280,12 @@ export default async function ModelPage({
label='Max output'
value={
model.capabilities.maxOutputTokens
- ? `${formatTokenCount(model.capabilities.maxOutputTokens)} tokens`
- : 'Standard defaults'
+ ? `${formatTokenCount(getEffectiveMaxOutputTokens(model.capabilities))} tokens`
+ : 'Not published'
}
/>
-
+ {model.bestFor ? : null}
diff --git a/apps/sim/app/(landing)/models/utils.test.ts b/apps/sim/app/(landing)/models/utils.test.ts
new file mode 100644
index 00000000000..894c74500c9
--- /dev/null
+++ b/apps/sim/app/(landing)/models/utils.test.ts
@@ -0,0 +1,49 @@
+import { describe, expect, it } from 'vitest'
+import { buildModelCapabilityFacts, getEffectiveMaxOutputTokens, getModelBySlug } from './utils'
+
+describe('model catalog capability facts', () => {
+ it.concurrent(
+ 'shows structured outputs support and published max output tokens for gpt-4o',
+ () => {
+ const model = getModelBySlug('openai', 'gpt-4o')
+
+ expect(model).not.toBeNull()
+ expect(model).toBeDefined()
+
+ const capabilityFacts = buildModelCapabilityFacts(model!)
+ const structuredOutputs = capabilityFacts.find((fact) => fact.label === 'Structured outputs')
+ const maxOutputTokens = capabilityFacts.find((fact) => fact.label === 'Max output tokens')
+
+ expect(getEffectiveMaxOutputTokens(model!.capabilities)).toBe(16384)
+ expect(structuredOutputs?.value).toBe('Supported')
+ expect(maxOutputTokens?.value).toBe('16k')
+ }
+ )
+
+ it.concurrent('preserves native structured outputs labeling for claude models', () => {
+ const model = getModelBySlug('anthropic', 'claude-sonnet-4-6')
+
+ expect(model).not.toBeNull()
+ expect(model).toBeDefined()
+
+ const capabilityFacts = buildModelCapabilityFacts(model!)
+ const structuredOutputs = capabilityFacts.find((fact) => fact.label === 'Structured outputs')
+
+ expect(structuredOutputs?.value).toBe('Supported (native)')
+ })
+
+ it.concurrent('does not invent a max output token limit when one is not published', () => {
+ expect(getEffectiveMaxOutputTokens({})).toBeNull()
+ })
+
+ it.concurrent('keeps best-for copy for clearly differentiated models only', () => {
+ const researchModel = getModelBySlug('google', 'deep-research-pro-preview-12-2025')
+ const generalModel = getModelBySlug('xai', 'grok-4-latest')
+
+ expect(researchModel).not.toBeNull()
+ expect(generalModel).not.toBeNull()
+
+ expect(researchModel?.bestFor).toContain('research workflows')
+ expect(generalModel?.bestFor).toBeUndefined()
+ })
+})
diff --git a/apps/sim/app/(landing)/models/utils.ts b/apps/sim/app/(landing)/models/utils.ts
index cdf79f87b7c..8e649c95c6b 100644
--- a/apps/sim/app/(landing)/models/utils.ts
+++ b/apps/sim/app/(landing)/models/utils.ts
@@ -112,7 +112,7 @@ export interface CatalogModel {
capabilities: ModelCapabilities
capabilityTags: string[]
summary: string
- bestFor: string
+ bestFor?: string
searchText: string
}
@@ -190,6 +190,14 @@ export function formatCapabilityBoolean(
return value ? positive : negative
}
+function supportsCatalogStructuredOutputs(capabilities: ModelCapabilities): boolean {
+ return !capabilities.deepResearch
+}
+
+export function getEffectiveMaxOutputTokens(capabilities: ModelCapabilities): number | null {
+ return capabilities.maxOutputTokens ?? null
+}
+
function trimTrailingZeros(value: string): string {
return value.replace(/\.0+$/, '').replace(/(\.\d*?)0+$/, '$1')
}
@@ -326,7 +334,7 @@ function buildCapabilityTags(capabilities: ModelCapabilities): string[] {
tags.push('Tool choice')
}
- if (capabilities.nativeStructuredOutputs) {
+ if (supportsCatalogStructuredOutputs(capabilities)) {
tags.push('Structured outputs')
}
@@ -365,7 +373,7 @@ function buildBestForLine(model: {
pricing: PricingInfo
capabilities: ModelCapabilities
contextWindow: number | null
-}): string {
+}): string | null {
const { pricing, capabilities, contextWindow } = model
if (capabilities.deepResearch) {
@@ -376,10 +384,6 @@ function buildBestForLine(model: {
return 'Best for reasoning-heavy tasks that need more deliberate model control.'
}
- if (pricing.input <= 0.2 && pricing.output <= 1.25) {
- return 'Best for cost-sensitive automations, background tasks, and high-volume workloads.'
- }
-
if (contextWindow && contextWindow >= 1000000) {
return 'Best for long-context retrieval, large documents, and high-memory workflows.'
}
@@ -388,7 +392,11 @@ function buildBestForLine(model: {
return 'Best for production workflows that need reliable typed outputs.'
}
- return 'Best for general-purpose AI workflows inside Sim.'
+ if (pricing.input <= 0.2 && pricing.output <= 1.25) {
+ return 'Best for cost-sensitive automations, background tasks, and high-volume workloads.'
+ }
+
+ return null
}
function buildModelSummary(
@@ -437,6 +445,11 @@ const rawProviders = Object.values(PROVIDER_DEFINITIONS).map((provider) => {
const shortId = stripProviderPrefix(provider.id, model.id)
const mergedCapabilities = { ...provider.capabilities, ...model.capabilities }
const capabilityTags = buildCapabilityTags(mergedCapabilities)
+ const bestFor = buildBestForLine({
+ pricing: model.pricing,
+ capabilities: mergedCapabilities,
+ contextWindow: model.contextWindow ?? null,
+ })
const displayName = formatModelDisplayName(provider.id, model.id)
const modelSlug = slugify(shortId)
const href = `/models/${providerSlug}/${modelSlug}`
@@ -461,11 +474,7 @@ const rawProviders = Object.values(PROVIDER_DEFINITIONS).map((provider) => {
model.contextWindow ?? null,
capabilityTags
),
- bestFor: buildBestForLine({
- pricing: model.pricing,
- capabilities: mergedCapabilities,
- contextWindow: model.contextWindow ?? null,
- }),
+ ...(bestFor ? { bestFor } : {}),
searchText: [
provider.name,
providerDisplayName,
@@ -683,6 +692,7 @@ export function buildModelFaqs(provider: CatalogProvider, model: CatalogModel):
export function buildModelCapabilityFacts(model: CatalogModel): CapabilityFact[] {
const { capabilities } = model
+ const supportsStructuredOutputs = supportsCatalogStructuredOutputs(capabilities)
return [
{
@@ -711,7 +721,11 @@ export function buildModelCapabilityFacts(model: CatalogModel): CapabilityFact[]
},
{
label: 'Structured outputs',
- value: formatCapabilityBoolean(capabilities.nativeStructuredOutputs),
+ value: supportsStructuredOutputs
+ ? capabilities.nativeStructuredOutputs
+ ? 'Supported (native)'
+ : 'Supported'
+ : 'Not supported',
},
{
label: 'Tool choice',
@@ -732,8 +746,8 @@ export function buildModelCapabilityFacts(model: CatalogModel): CapabilityFact[]
{
label: 'Max output tokens',
value: capabilities.maxOutputTokens
- ? formatTokenCount(capabilities.maxOutputTokens)
- : 'Standard defaults',
+ ? formatTokenCount(getEffectiveMaxOutputTokens(capabilities))
+ : 'Not published',
},
]
}
@@ -752,8 +766,8 @@ export function getProviderCapabilitySummary(provider: CatalogProvider): Capabil
const reasoningCount = provider.models.filter(
(model) => model.capabilities.reasoningEffort || model.capabilities.thinking
).length
- const structuredCount = provider.models.filter(
- (model) => model.capabilities.nativeStructuredOutputs
+ const structuredCount = provider.models.filter((model) =>
+ supportsCatalogStructuredOutputs(model.capabilities)
).length
const deepResearchCount = provider.models.filter(
(model) => model.capabilities.deepResearch
diff --git a/apps/sim/app/llms.txt/route.ts b/apps/sim/app/llms.txt/route.ts
index 79c79d086ec..89fbc5a67f4 100644
--- a/apps/sim/app/llms.txt/route.ts
+++ b/apps/sim/app/llms.txt/route.ts
@@ -1,42 +1,44 @@
import { getBaseUrl } from '@/lib/core/utils/urls'
-export async function GET() {
+export function GET() {
const baseUrl = getBaseUrl()
- const llmsContent = `# Sim
+ const content = `# Sim
-> Sim is the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.
+> Sim is the open-source platform to build AI agents and run your agentic workforce. Connect integrations and LLMs to deploy and orchestrate agentic workflows.
-Sim lets teams create agents, workflows, knowledge bases, tables, and docs. Over 100,000 builders use Sim — from startups to Fortune 500 companies. SOC2 compliant.
+Sim lets teams create agents, workflows, knowledge bases, tables, and docs. It supports both product discovery pages and deeper technical documentation.
-## Core Pages
+## Preferred URLs
-- [Homepage](${baseUrl}): Product overview, features, and pricing
+- [Homepage](${baseUrl}): Product overview and primary entry point
+- [Integrations directory](${baseUrl}/integrations): Public catalog of integrations and automation capabilities
+- [Models directory](${baseUrl}/models): Public catalog of AI models, pricing, context windows, and capabilities
+- [Blog](${baseUrl}/blog): Announcements, guides, and product context
- [Changelog](${baseUrl}/changelog): Product updates and release notes
-- [Sim Blog](${baseUrl}/blog): Announcements, insights, and guides
## Documentation
-- [Documentation](https://docs.sim.ai): Complete guides and API reference
-- [Quickstart](https://docs.sim.ai/quickstart): Get started in 5 minutes
-- [API Reference](https://docs.sim.ai/api): REST API documentation
+- [Documentation](https://docs.sim.ai): Product guides and technical reference
+- [Quickstart](https://docs.sim.ai/quickstart): Fastest path to getting started
+- [API Reference](https://docs.sim.ai/api): API documentation
## Key Concepts
- **Workspace**: Container for workflows, data sources, and executions
- **Workflow**: Directed graph of blocks defining an agentic process
-- **Block**: Individual step (LLM call, tool call, HTTP request, code execution)
+- **Block**: Individual step such as an LLM call, tool call, HTTP request, or code execution
- **Trigger**: Event or schedule that initiates workflow execution
- **Execution**: A single run of a workflow with logs and outputs
-- **Knowledge Base**: Vector-indexed document store for retrieval-augmented generation
+- **Knowledge Base**: Document store used for retrieval-augmented generation
## Capabilities
- AI agent creation and deployment
- Agentic workflow orchestration
-- 1,000+ integrations (Slack, Gmail, Notion, Airtable, databases, and more)
-- Multi-model LLM orchestration (OpenAI, Anthropic, Google, Mistral, xAI, Perplexity)
-- Knowledge base creation with retrieval-augmented generation (RAG)
+- Integrations across business tools, databases, and communication platforms
+- Multi-model LLM orchestration
+- Knowledge bases and retrieval-augmented generation
- Table creation and management
- Document creation and processing
- Scheduled and webhook-triggered executions
@@ -45,24 +47,19 @@ Sim lets teams create agents, workflows, knowledge bases, tables, and docs. Over
- AI agent deployment and orchestration
- Knowledge bases and RAG pipelines
-- Document creation and processing
- Customer support automation
-- Internal operations (sales, marketing, legal, finance)
+- Internal operations workflows across sales, marketing, legal, and finance
-## Links
+## Additional Links
- [GitHub Repository](https://github.com/simstudioai/sim): Open-source codebase
-- [Discord Community](https://discord.gg/Hr4UWYEcTT): Get help and connect with 100,000+ builders
-- [X/Twitter](https://x.com/simdotai): Product updates and announcements
-
-## Optional
-
-- [Careers](https://jobs.ashbyhq.com/sim): Join the Sim team
+- [Docs](https://docs.sim.ai): Canonical documentation source
- [Terms of Service](${baseUrl}/terms): Legal terms
- [Privacy Policy](${baseUrl}/privacy): Data handling practices
+- [Sitemap](${baseUrl}/sitemap.xml): Public URL inventory
`
- return new Response(llmsContent, {
+ return new Response(content, {
headers: {
'Content-Type': 'text/markdown; charset=utf-8',
'Cache-Control': 'public, max-age=86400, s-maxage=86400',
diff --git a/apps/sim/app/sitemap.ts b/apps/sim/app/sitemap.ts
index 6c95b859370..a558525950e 100644
--- a/apps/sim/app/sitemap.ts
+++ b/apps/sim/app/sitemap.ts
@@ -8,6 +8,34 @@ export default async function sitemap(): Promise {
const baseUrl = getBaseUrl()
const now = new Date()
+ const integrationPages: MetadataRoute.Sitemap = integrations.map((integration) => ({
+ url: `${baseUrl}/integrations/${integration.slug}`,
+ lastModified: now,
+ }))
+ const modelHubPages: MetadataRoute.Sitemap = [
+ {
+ url: `${baseUrl}/integrations`,
+ lastModified: now,
+ },
+ {
+ url: `${baseUrl}/models`,
+ lastModified: now,
+ },
+ {
+ url: `${baseUrl}/partners`,
+ lastModified: now,
+ },
+ ]
+ const providerPages: MetadataRoute.Sitemap = MODEL_PROVIDERS_WITH_CATALOGS.map((provider) => ({
+ url: `${baseUrl}${provider.href}`,
+ lastModified: new Date(
+ Math.max(...provider.models.map((model) => new Date(model.pricing.updatedAt).getTime()))
+ ),
+ }))
+ const modelPages: MetadataRoute.Sitemap = ALL_CATALOG_MODELS.map((model) => ({
+ url: `${baseUrl}${model.href}`,
+ lastModified: new Date(model.pricing.updatedAt),
+ }))
const staticPages: MetadataRoute.Sitemap = [
{
@@ -26,14 +54,6 @@ export default async function sitemap(): Promise {
// url: `${baseUrl}/templates`,
// lastModified: now,
// },
- {
- url: `${baseUrl}/integrations`,
- lastModified: now,
- },
- {
- url: `${baseUrl}/models`,
- lastModified: now,
- },
{
url: `${baseUrl}/changelog`,
lastModified: now,
@@ -54,20 +74,12 @@ export default async function sitemap(): Promise {
lastModified: new Date(p.updated ?? p.date),
}))
- const integrationPages: MetadataRoute.Sitemap = integrations.map((i) => ({
- url: `${baseUrl}/integrations/${i.slug}`,
- lastModified: now,
- }))
-
- const providerPages: MetadataRoute.Sitemap = MODEL_PROVIDERS_WITH_CATALOGS.map((provider) => ({
- url: `${baseUrl}${provider.href}`,
- lastModified: now,
- }))
-
- const modelPages: MetadataRoute.Sitemap = ALL_CATALOG_MODELS.map((model) => ({
- url: `${baseUrl}${model.href}`,
- lastModified: new Date(model.pricing.updatedAt),
- }))
-
- return [...staticPages, ...blogPages, ...integrationPages, ...providerPages, ...modelPages]
+ return [
+ ...staticPages,
+ ...modelHubPages,
+ ...integrationPages,
+ ...providerPages,
+ ...modelPages,
+ ...blogPages,
+ ]
}
diff --git a/apps/sim/providers/models.ts b/apps/sim/providers/models.ts
index 3465f223230..5d293dd6047 100644
--- a/apps/sim/providers/models.ts
+++ b/apps/sim/providers/models.ts
@@ -271,6 +271,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -290,6 +291,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -324,6 +326,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -342,6 +345,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -360,6 +364,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -373,6 +378,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 16384,
},
contextWindow: 128000,
},
@@ -449,6 +455,7 @@ export const PROVIDER_DEFINITIONS: Record = {
reasoningEffort: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 100000,
},
contextWindow: 200000,
},
@@ -463,6 +470,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 16384,
},
contextWindow: 128000,
},
@@ -509,7 +517,7 @@ export const PROVIDER_DEFINITIONS: Record = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
- maxOutputTokens: 128000,
+ maxOutputTokens: 64000,
thinking: {
levels: ['low', 'medium', 'high', 'max'],
default: 'high',
@@ -741,6 +749,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -759,6 +768,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -777,6 +787,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -795,6 +806,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -813,6 +825,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -831,6 +844,7 @@ export const PROVIDER_DEFINITIONS: Record = {
verbosity: {
values: ['low', 'medium', 'high'],
},
+ maxOutputTokens: 128000,
},
contextWindow: 400000,
},
@@ -1067,6 +1081,7 @@ export const PROVIDER_DEFINITIONS: Record = {
levels: ['minimal', 'low', 'medium', 'high'],
default: 'high',
},
+ maxOutputTokens: 65536,
},
contextWindow: 1048576,
},
@@ -1084,6 +1099,7 @@ export const PROVIDER_DEFINITIONS: Record = {
levels: ['minimal', 'low', 'medium', 'high'],
default: 'minimal',
},
+ maxOutputTokens: 65536,
},
contextWindow: 1048576,
},
@@ -1101,6 +1117,7 @@ export const PROVIDER_DEFINITIONS: Record = {
levels: ['minimal', 'low', 'medium', 'high'],
default: 'high',
},
+ maxOutputTokens: 65536,
},
contextWindow: 1000000,
},
@@ -1114,6 +1131,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 65536,
},
contextWindow: 1048576,
},
@@ -1127,6 +1145,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 65536,
},
contextWindow: 1048576,
},
@@ -1140,6 +1159,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 65536,
},
contextWindow: 1048576,
},
@@ -1153,6 +1173,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 8192,
},
contextWindow: 1048576,
},
@@ -1165,6 +1186,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 2 },
+ maxOutputTokens: 8192,
},
contextWindow: 1048576,
},
@@ -1178,6 +1200,7 @@ export const PROVIDER_DEFINITIONS: Record = {
capabilities: {
deepResearch: true,
memory: false,
+ maxOutputTokens: 65536,
},
contextWindow: 1000000,
},
@@ -2094,6 +2117,7 @@ export const PROVIDER_DEFINITIONS: Record = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
+ maxOutputTokens: 64000,
},
contextWindow: 200000,
},
@@ -2107,6 +2131,7 @@ export const PROVIDER_DEFINITIONS: Record = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
+ maxOutputTokens: 64000,
},
contextWindow: 200000,
},
@@ -2120,6 +2145,7 @@ export const PROVIDER_DEFINITIONS: Record = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
+ maxOutputTokens: 64000,
},
contextWindow: 200000,
},
@@ -2133,6 +2159,7 @@ export const PROVIDER_DEFINITIONS: Record = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
+ maxOutputTokens: 64000,
},
contextWindow: 200000,
},
@@ -2337,6 +2364,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 1 },
+ maxOutputTokens: 32768,
},
contextWindow: 128000,
},
@@ -2373,6 +2401,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 1 },
+ maxOutputTokens: 16384,
},
contextWindow: 128000,
},
@@ -2385,6 +2414,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 1 },
+ maxOutputTokens: 40000,
},
contextWindow: 128000,
},
@@ -2397,6 +2427,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 1 },
+ maxOutputTokens: 8192,
},
contextWindow: 128000,
},
@@ -2409,6 +2440,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 1 },
+ maxOutputTokens: 8192,
},
contextWindow: 128000,
},
@@ -2421,6 +2453,7 @@ export const PROVIDER_DEFINITIONS: Record = {
},
capabilities: {
temperature: { min: 0, max: 1 },
+ maxOutputTokens: 8192,
},
contextWindow: 128000,
},
@@ -2863,13 +2896,17 @@ export function getModelsWithoutMemory(): string[] {
export function getMaxOutputTokensForModel(modelId: string): number {
const normalizedModelId = modelId.toLowerCase()
const STANDARD_MAX_OUTPUT_TOKENS = 4096
+ const allModels = Object.values(PROVIDER_DEFINITIONS).flatMap((provider) => provider.models)
- for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
- for (const model of provider.models) {
- const baseModelId = model.id.toLowerCase()
- if (normalizedModelId === baseModelId || normalizedModelId.startsWith(`${baseModelId}-`)) {
- return model.capabilities.maxOutputTokens || STANDARD_MAX_OUTPUT_TOKENS
- }
+ const exactMatch = allModels.find((model) => model.id.toLowerCase() === normalizedModelId)
+ if (exactMatch) {
+ return exactMatch.capabilities.maxOutputTokens || STANDARD_MAX_OUTPUT_TOKENS
+ }
+
+ for (const model of allModels) {
+ const baseModelId = model.id.toLowerCase()
+ if (normalizedModelId.startsWith(`${baseModelId}-`)) {
+ return model.capabilities.maxOutputTokens || STANDARD_MAX_OUTPUT_TOKENS
}
}
diff --git a/apps/sim/providers/utils.test.ts b/apps/sim/providers/utils.test.ts
index 9e1491d9aca..0b46003ca4a 100644
--- a/apps/sim/providers/utils.test.ts
+++ b/apps/sim/providers/utils.test.ts
@@ -664,6 +664,45 @@ describe('Model Capabilities', () => {
describe('Max Output Tokens', () => {
describe('getMaxOutputTokensForModel', () => {
+ it.concurrent('should return published max for OpenAI GPT-4o', () => {
+ expect(getMaxOutputTokensForModel('gpt-4o')).toBe(16384)
+ })
+
+ it.concurrent('should return published max for OpenAI GPT-5.1', () => {
+ expect(getMaxOutputTokensForModel('gpt-5.1')).toBe(128000)
+ })
+
+ it.concurrent('should return published max for OpenAI GPT-5 Chat', () => {
+ expect(getMaxOutputTokensForModel('gpt-5-chat-latest')).toBe(16384)
+ })
+
+ it.concurrent('should return published max for OpenAI o1', () => {
+ expect(getMaxOutputTokensForModel('o1')).toBe(100000)
+ })
+
+ it.concurrent('should return updated max for Claude Sonnet 4.6', () => {
+ expect(getMaxOutputTokensForModel('claude-sonnet-4-6')).toBe(64000)
+ })
+
+ it.concurrent('should return published max for Gemini 2.5 Pro', () => {
+ expect(getMaxOutputTokensForModel('gemini-2.5-pro')).toBe(65536)
+ })
+
+ it.concurrent('should return published max for Azure GPT-5.2', () => {
+ expect(getMaxOutputTokensForModel('azure/gpt-5.2')).toBe(128000)
+ })
+
+ it.concurrent('should return standard default for models without maxOutputTokens', () => {
+ expect(getMaxOutputTokensForModel('deepseek-reasoner')).toBe(4096)
+ expect(getMaxOutputTokensForModel('grok-4-latest')).toBe(4096)
+ })
+
+ it.concurrent('should return published max for Bedrock Claude Opus 4.1', () => {
+ expect(getMaxOutputTokensForModel('bedrock/anthropic.claude-opus-4-1-20250805-v1:0')).toBe(
+ 64000
+ )
+ })
+
it.concurrent('should return correct max for Claude Opus 4.6', () => {
expect(getMaxOutputTokensForModel('claude-opus-4-6')).toBe(128000)
})
@@ -676,10 +715,6 @@ describe('Max Output Tokens', () => {
expect(getMaxOutputTokensForModel('claude-opus-4-1')).toBe(32000)
})
- it.concurrent('should return standard default for models without maxOutputTokens', () => {
- expect(getMaxOutputTokensForModel('gpt-4o')).toBe(4096)
- })
-
it.concurrent('should return standard default for unknown models', () => {
expect(getMaxOutputTokensForModel('unknown-model')).toBe(4096)
})