Mantler logoMantler

What is a mantle?

Browse mantle configurations.

Mantles are hardware-aware AI stack objects. They combine hardware, model, runtime, orchestrator, and provider into a reusable configuration ranked by real-world outcomes.

prod-gcp-ops-001

deepseek-r2 / vllm · 0.976

mantle: prod-gcp-ops-001
  hardware: l40s
  model: deepseek-r2
  runtime: vllm
  orchestrator: builtin
  provider: gcp
  score: 0.976
  verified: 4200 runs

prod-azure-data-pipeline-002

qwen3 / tensorrt · 0.973

mantle: prod-azure-data-pipeline-002
  hardware: rtx-6000-ada
  model: qwen3
  runtime: tensorrt
  orchestrator: autogen
  provider: azure
  score: 0.973
  verified: 4196 runs

prod-local-agents-003

command-a / ollama · 0.970

mantle: prod-local-agents-003
  hardware: h100-pcie
  model: command-a
  runtime: ollama
  orchestrator: langgraph
  provider: local
  score: 0.970
  verified: 4192 runs

prod-local-agents-011

deepseek-r2 / tensorrt · 0.953

mantle: prod-local-agents-011
  hardware: h100-pcie
  model: deepseek-r2
  runtime: tensorrt
  orchestrator: autogen
  provider: local
  score: 0.953
  verified: 4050 runs

prod-gcp-ops-013

command-a / vllm · 0.953

mantle: prod-gcp-ops-013
  hardware: l40s
  model: command-a
  runtime: vllm
  orchestrator: builtin
  provider: gcp
  score: 0.953
  verified: 3932 runs

prod-aws-retrieval-012

qwen3 / ollama · 0.950

mantle: prod-aws-retrieval-012
  hardware: a100-sxm
  model: qwen3
  runtime: ollama
  orchestrator: langgraph
  provider: aws
  score: 0.950
  verified: 4046 runs

team-local-research-004

qwen3 / ollama · 0.943

mantle: team-local-research-004
  hardware: dgx-spark
  model: qwen3
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.943
  verified: 2088 runs

team-aws-coding-007

llama4-scout / ollama · 0.941

mantle: team-aws-coding-007
  hardware: dgx-spark
  model: llama4-scout
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.941
  verified: 1966 runs

team-aws-qa-005

gemma4 / vllm · 0.940

mantle: team-aws-qa-005
  hardware: dgx-spark
  model: gemma4
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.940
  verified: 2084 runs

team-local-research-008

mistral-medium / vllm · 0.938

mantle: team-local-research-008
  hardware: dgx-spark
  model: mistral-medium
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.938
  verified: 1962 runs

team-local-agents-006

phi-4 / lmstudio · 0.937

mantle: team-local-agents-006
  hardware: dgx-spark
  model: phi-4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.937
  verified: 2080 runs

prod-gcp-ops-021

deepseek-r2 / ollama · 0.936

mantle: prod-gcp-ops-021
  hardware: l40s
  model: deepseek-r2
  runtime: ollama
  orchestrator: langgraph
  provider: gcp
  score: 0.936
  verified: 3790 runs

prod-azure-data-pipeline-022

qwen3 / vllm · 0.933

mantle: prod-azure-data-pipeline-022
  hardware: rtx-6000-ada
  model: qwen3
  runtime: vllm
  orchestrator: builtin
  provider: azure
  score: 0.933
  verified: 3786 runs

prod-local-agents-023

command-a / tensorrt · 0.930

mantle: prod-local-agents-023
  hardware: h100-pcie
  model: command-a
  runtime: tensorrt
  orchestrator: autogen
  provider: local
  score: 0.930
  verified: 3782 runs

team-local-agents-014

qwen3 / vllm · 0.926

mantle: team-local-agents-014
  hardware: dgx-spark
  model: qwen3
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.926
  verified: 1828 runs

team-aws-coding-015

gemma4 / lmstudio · 0.923

mantle: team-aws-coding-015
  hardware: dgx-spark
  model: gemma4
  runtime: lmstudio
  orchestrator: crewai
  provider: aws
  score: 0.923
  verified: 1824 runs

team-local-research-016

phi-4 / ollama · 0.920

mantle: team-local-research-016
  hardware: dgx-spark
  model: phi-4
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.920
  verified: 1820 runs

prod-local-agents-031

deepseek-r2 / vllm · 0.919

mantle: prod-local-agents-031
  hardware: h100-pcie
  model: deepseek-r2
  runtime: vllm
  orchestrator: builtin
  provider: local
  score: 0.919
  verified: 3530 runs

team-aws-qa-017

llama4-scout / vllm · 0.917

mantle: team-aws-qa-017
  hardware: dgx-spark
  model: llama4-scout
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.917
  verified: 1816 runs

prod-aws-retrieval-032

qwen3 / tensorrt · 0.916

mantle: prod-aws-retrieval-032
  hardware: a100-sxm
  model: qwen3
  runtime: tensorrt
  orchestrator: autogen
  provider: aws
  score: 0.916
  verified: 3526 runs

team-local-agents-018

mistral-medium / lmstudio · 0.914

mantle: team-local-agents-018
  hardware: dgx-spark
  model: mistral-medium
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.914
  verified: 1812 runs

prod-gcp-ops-033

command-a / ollama · 0.913

mantle: prod-gcp-ops-033
  hardware: l40s
  model: command-a
  runtime: ollama
  orchestrator: langgraph
  provider: gcp
  score: 0.913
  verified: 3522 runs

edge-lambda-retrieval-009

llama4-scout / vllm · 0.911

mantle: edge-lambda-retrieval-009
  hardware: t4
  model: llama4-scout
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.911
  verified: 1108 runs

edge-aws-ops-010

qwen3 / ollama · 0.908

mantle: edge-aws-ops-010
  hardware: epyc-9654
  model: qwen3
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.908
  verified: 1104 runs

team-aws-qa-025

gemma4 / ollama · 0.906

mantle: team-aws-qa-025
  hardware: dgx-spark
  model: gemma4
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.906
  verified: 1564 runs

team-local-research-024

qwen3 / lmstudio · 0.903

mantle: team-local-research-024
  hardware: dgx-spark
  model: qwen3
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.903
  verified: 1678 runs

team-local-agents-026

phi-4 / vllm · 0.903

mantle: team-local-agents-026
  hardware: dgx-spark
  model: phi-4
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.903
  verified: 1560 runs

team-aws-coding-027

llama4-scout / lmstudio · 0.900

mantle: team-aws-coding-027
  hardware: dgx-spark
  model: llama4-scout
  runtime: lmstudio
  orchestrator: crewai
  provider: aws
  score: 0.900
  verified: 1556 runs

team-local-research-028

mistral-medium / ollama · 0.897

mantle: team-local-research-028
  hardware: dgx-spark
  model: mistral-medium
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.897
  verified: 1552 runs

prod-gcp-ops-041

deepseek-r2 / tensorrt · 0.896

mantle: prod-gcp-ops-041
  hardware: l40s
  model: deepseek-r2
  runtime: tensorrt
  orchestrator: autogen
  provider: gcp
  score: 0.896
  verified: 3380 runs

prod-local-agents-043

command-a / vllm · 0.896

mantle: prod-local-agents-043
  hardware: h100-pcie
  model: command-a
  runtime: vllm
  orchestrator: builtin
  provider: local
  score: 0.896
  verified: 3262 runs

edge-aws-support-019

gemma4 / ollama · 0.894

mantle: edge-aws-support-019
  hardware: rtx-5080
  model: gemma4
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.894
  verified: 848 runs

prod-azure-data-pipeline-042

qwen3 / ollama · 0.893

mantle: prod-azure-data-pipeline-042
  hardware: rtx-6000-ada
  model: qwen3
  runtime: ollama
  orchestrator: langgraph
  provider: azure
  score: 0.893
  verified: 3266 runs

edge-local-qa-020

phi-4 / lmstudio · 0.891

mantle: edge-local-qa-020
  hardware: l4
  model: phi-4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.891
  verified: 844 runs

team-local-agents-034

qwen3 / ollama · 0.886

mantle: team-local-agents-034
  hardware: dgx-spark
  model: qwen3
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.886
  verified: 1418 runs

team-aws-qa-037

llama4-scout / ollama · 0.884

mantle: team-aws-qa-037
  hardware: dgx-spark
  model: llama4-scout
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.884
  verified: 1296 runs

team-aws-coding-035

gemma4 / vllm · 0.883

mantle: team-aws-coding-035
  hardware: dgx-spark
  model: gemma4
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.883
  verified: 1414 runs

team-local-agents-038

mistral-medium / vllm · 0.881

mantle: team-local-agents-038
  hardware: dgx-spark
  model: mistral-medium
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.881
  verified: 1292 runs

team-local-research-036

phi-4 / lmstudio · 0.880

mantle: team-local-research-036
  hardware: dgx-spark
  model: phi-4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.880
  verified: 1300 runs

prod-local-agents-051

deepseek-r2 / ollama · 0.879

mantle: prod-local-agents-051
  hardware: h100-pcie
  model: deepseek-r2
  runtime: ollama
  orchestrator: langgraph
  provider: local
  score: 0.879
  verified: 3120 runs

prod-aws-retrieval-052

qwen3 / vllm · 0.876

mantle: prod-aws-retrieval-052
  hardware: a100-sxm
  model: qwen3
  runtime: vllm
  orchestrator: builtin
  provider: aws
  score: 0.876
  verified: 3116 runs

prod-gcp-ops-053

command-a / tensorrt · 0.873

mantle: prod-gcp-ops-053
  hardware: l40s
  model: command-a
  runtime: tensorrt
  orchestrator: autogen
  provider: gcp
  score: 0.873
  verified: 3112 runs

edge-local-retrieval-029

llama4-scout / lmstudio · 0.870

mantle: edge-local-retrieval-029
  hardware: rtx-5070-ti
  model: llama4-scout
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.870
  verified: 698 runs

team-local-research-044

qwen3 / vllm · 0.869

mantle: team-local-research-044
  hardware: dgx-spark
  model: qwen3
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.869
  verified: 1158 runs

edge-lambda-ops-030

qwen3 / vllm · 0.867

mantle: edge-lambda-ops-030
  hardware: t4
  model: qwen3
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.867
  verified: 584 runs

team-aws-qa-045

gemma4 / lmstudio · 0.866

mantle: team-aws-qa-045
  hardware: dgx-spark
  model: gemma4
  runtime: lmstudio
  orchestrator: crewai
  provider: aws
  score: 0.866
  verified: 1154 runs

team-local-agents-046

phi-4 / ollama · 0.863

mantle: team-local-agents-046
  hardware: dgx-spark
  model: phi-4
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.863
  verified: 1150 runs

prod-gcp-ops-061

deepseek-r2 / vllm · 0.862

mantle: prod-gcp-ops-061
  hardware: l40s
  model: deepseek-r2
  runtime: vllm
  orchestrator: builtin
  provider: gcp
  score: 0.862
  verified: 2860 runs

team-aws-coding-047

llama4-scout / vllm · 0.860

mantle: team-aws-coding-047
  hardware: dgx-spark
  model: llama4-scout
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.860
  verified: 1146 runs

prod-azure-data-pipeline-062

qwen3 / tensorrt · 0.859

mantle: prod-azure-data-pipeline-062
  hardware: rtx-6000-ada
  model: qwen3
  runtime: tensorrt
  orchestrator: autogen
  provider: azure
  score: 0.859
  verified: 2856 runs

team-local-research-048

mistral-medium / lmstudio · 0.857

mantle: team-local-research-048
  hardware: dgx-spark
  model: mistral-medium
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.857
  verified: 1032 runs

prod-local-agents-063

command-a / ollama · 0.856

mantle: prod-local-agents-063
  hardware: h100-pcie
  model: command-a
  runtime: ollama
  orchestrator: langgraph
  provider: local
  score: 0.856
  verified: 2852 runs

edge-lambda-support-039

gemma4 / vllm · 0.854

mantle: edge-lambda-support-039
  hardware: rtx-4080-super
  model: gemma4
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.854
  verified: 438 runs

edge-aws-qa-040

phi-4 / ollama · 0.851

mantle: edge-aws-qa-040
  hardware: rtx-5080
  model: phi-4
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.851
  verified: 434 runs

team-aws-coding-055

gemma4 / ollama · 0.849

mantle: team-aws-coding-055
  hardware: dgx-spark
  model: gemma4
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.849
  verified: 894 runs

team-local-agents-054

qwen3 / lmstudio · 0.846

mantle: team-local-agents-054
  hardware: dgx-spark
  model: qwen3
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.846
  verified: 898 runs

team-local-research-056

phi-4 / vllm · 0.846

mantle: team-local-research-056
  hardware: dgx-spark
  model: phi-4
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.846
  verified: 890 runs

team-aws-qa-057

llama4-scout / lmstudio · 0.843

mantle: team-aws-qa-057
  hardware: dgx-spark
  model: llama4-scout
  runtime: lmstudio
  orchestrator: crewai
  provider: aws
  score: 0.843
  verified: 886 runs

team-local-agents-058

mistral-medium / ollama · 0.840

mantle: team-local-agents-058
  hardware: dgx-spark
  model: mistral-medium
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.840
  verified: 882 runs

prod-local-agents-071

deepseek-r2 / tensorrt · 0.839

mantle: prod-local-agents-071
  hardware: h100-pcie
  model: deepseek-r2
  runtime: tensorrt
  orchestrator: autogen
  provider: local
  score: 0.839
  verified: 2600 runs

prod-gcp-ops-073

command-a / vllm · 0.839

mantle: prod-gcp-ops-073
  hardware: l40s
  model: command-a
  runtime: vllm
  orchestrator: builtin
  provider: gcp
  score: 0.839
  verified: 2592 runs

edge-aws-retrieval-049

llama4-scout / ollama · 0.837

mantle: edge-aws-retrieval-049
  hardware: xeon-max
  model: llama4-scout
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.837
  verified: 178 runs

prod-aws-retrieval-072

qwen3 / ollama · 0.836

mantle: prod-aws-retrieval-072
  hardware: a100-sxm
  model: qwen3
  runtime: ollama
  orchestrator: langgraph
  provider: aws
  score: 0.836
  verified: 2596 runs

edge-local-ops-050

qwen3 / lmstudio · 0.834

mantle: edge-local-ops-050
  hardware: rtx-5070-ti
  model: qwen3
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.834
  verified: 174 runs

team-local-research-064

qwen3 / ollama · 0.829

mantle: team-local-research-064
  hardware: dgx-spark
  model: qwen3
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.829
  verified: 748 runs

team-aws-coding-067

llama4-scout / ollama · 0.827

mantle: team-aws-coding-067
  hardware: dgx-spark
  model: llama4-scout
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.827
  verified: 626 runs

team-aws-qa-065

gemma4 / vllm · 0.826

mantle: team-aws-qa-065
  hardware: dgx-spark
  model: gemma4
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.826
  verified: 634 runs

team-local-research-068

mistral-medium / vllm · 0.824

mantle: team-local-research-068
  hardware: dgx-spark
  model: mistral-medium
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.824
  verified: 622 runs

team-local-agents-066

phi-4 / lmstudio · 0.823

mantle: team-local-agents-066
  hardware: dgx-spark
  model: phi-4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.823
  verified: 630 runs

prod-gcp-ops-081

deepseek-r2 / ollama · 0.822

mantle: prod-gcp-ops-081
  hardware: l40s
  model: deepseek-r2
  runtime: ollama
  orchestrator: langgraph
  provider: gcp
  score: 0.822
  verified: 2450 runs

prod-azure-data-pipeline-082

qwen3 / vllm · 0.819

mantle: prod-azure-data-pipeline-082
  hardware: rtx-6000-ada
  model: qwen3
  runtime: vllm
  orchestrator: builtin
  provider: azure
  score: 0.819
  verified: 2446 runs

prod-local-agents-083

command-a / tensorrt · 0.816

mantle: prod-local-agents-083
  hardware: h100-pcie
  model: command-a
  runtime: tensorrt
  orchestrator: autogen
  provider: local
  score: 0.816
  verified: 2332 runs

edge-local-support-059

gemma4 / lmstudio · 0.813

mantle: edge-local-support-059
  hardware: epyc-9654
  model: gemma4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.813
  verified: 90 runs

team-local-agents-074

qwen3 / vllm · 0.812

mantle: team-local-agents-074
  hardware: dgx-spark
  model: qwen3
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.812
  verified: 488 runs

edge-lambda-qa-060

phi-4 / vllm · 0.810

mantle: edge-lambda-qa-060
  hardware: rtx-4080-super
  model: phi-4
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.810
  verified: 90 runs

team-aws-coding-075

gemma4 / lmstudio · 0.809

mantle: team-aws-coding-075
  hardware: dgx-spark
  model: gemma4
  runtime: lmstudio
  orchestrator: crewai
  provider: aws
  score: 0.809
  verified: 484 runs

team-local-research-076

phi-4 / ollama · 0.806

mantle: team-local-research-076
  hardware: dgx-spark
  model: phi-4
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.806
  verified: 480 runs

prod-local-agents-091

deepseek-r2 / vllm · 0.805

mantle: prod-local-agents-091
  hardware: h100-pcie
  model: deepseek-r2
  runtime: vllm
  orchestrator: builtin
  provider: local
  score: 0.805
  verified: 2190 runs

team-aws-qa-077

llama4-scout / vllm · 0.803

mantle: team-aws-qa-077
  hardware: dgx-spark
  model: llama4-scout
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.803
  verified: 366 runs

prod-aws-retrieval-092

qwen3 / tensorrt · 0.802

mantle: prod-aws-retrieval-092
  hardware: a100-sxm
  model: qwen3
  runtime: tensorrt
  orchestrator: autogen
  provider: aws
  score: 0.802
  verified: 2186 runs

team-local-agents-078

mistral-medium / lmstudio · 0.800

mantle: team-local-agents-078
  hardware: dgx-spark
  model: mistral-medium
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.800
  verified: 362 runs

prod-gcp-ops-093

command-a / ollama · 0.799

mantle: prod-gcp-ops-093
  hardware: l40s
  model: command-a
  runtime: ollama
  orchestrator: langgraph
  provider: gcp
  score: 0.799
  verified: 2182 runs

edge-lambda-retrieval-069

llama4-scout / vllm · 0.797

mantle: edge-lambda-retrieval-069
  hardware: l4
  model: llama4-scout
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.797
  verified: 90 runs

edge-aws-ops-070

qwen3 / ollama · 0.794

mantle: edge-aws-ops-070
  hardware: xeon-max
  model: qwen3
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.794
  verified: 90 runs

team-aws-qa-085

gemma4 / ollama · 0.792

mantle: team-aws-qa-085
  hardware: dgx-spark
  model: gemma4
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.792
  verified: 224 runs

team-local-research-084

qwen3 / lmstudio · 0.789

mantle: team-local-research-084
  hardware: dgx-spark
  model: qwen3
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.789
  verified: 228 runs

team-local-agents-086

phi-4 / vllm · 0.789

mantle: team-local-agents-086
  hardware: dgx-spark
  model: phi-4
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.789
  verified: 220 runs

team-aws-coding-087

llama4-scout / lmstudio · 0.786

mantle: team-aws-coding-087
  hardware: dgx-spark
  model: llama4-scout
  runtime: lmstudio
  orchestrator: crewai
  provider: aws
  score: 0.786
  verified: 220 runs

team-local-research-088

mistral-medium / ollama · 0.783

mantle: team-local-research-088
  hardware: dgx-spark
  model: mistral-medium
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.783
  verified: 220 runs

edge-aws-support-079

gemma4 / ollama · 0.780

mantle: edge-aws-support-079
  hardware: t4
  model: gemma4
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.780
  verified: 90 runs

edge-local-qa-080

phi-4 / lmstudio · 0.777

mantle: edge-local-qa-080
  hardware: epyc-9654
  model: phi-4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.777
  verified: 90 runs

team-local-agents-094

qwen3 / ollama · 0.772

mantle: team-local-agents-094
  hardware: dgx-spark
  model: qwen3
  runtime: ollama
  orchestrator: builtin
  provider: local
  score: 0.772
  verified: 220 runs

team-aws-qa-097

llama4-scout / ollama · 0.770

mantle: team-aws-qa-097
  hardware: dgx-spark
  model: llama4-scout
  runtime: ollama
  orchestrator: builtin
  provider: aws
  score: 0.770
  verified: 220 runs

team-aws-coding-095

gemma4 / vllm · 0.769

mantle: team-aws-coding-095
  hardware: dgx-spark
  model: gemma4
  runtime: vllm
  orchestrator: langgraph
  provider: aws
  score: 0.769
  verified: 220 runs

team-local-agents-098

mistral-medium / vllm · 0.767

mantle: team-local-agents-098
  hardware: dgx-spark
  model: mistral-medium
  runtime: vllm
  orchestrator: langgraph
  provider: local
  score: 0.767
  verified: 220 runs

team-local-research-096

phi-4 / lmstudio · 0.766

mantle: team-local-research-096
  hardware: dgx-spark
  model: phi-4
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.766
  verified: 220 runs

edge-local-retrieval-089

llama4-scout / lmstudio · 0.756

mantle: edge-local-retrieval-089
  hardware: rtx-5080
  model: llama4-scout
  runtime: lmstudio
  orchestrator: crewai
  provider: local
  score: 0.756
  verified: 90 runs

edge-lambda-ops-090

qwen3 / vllm · 0.753

mantle: edge-lambda-ops-090
  hardware: l4
  model: qwen3
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.753
  verified: 90 runs

edge-lambda-support-099

gemma4 / vllm · 0.740

mantle: edge-lambda-support-099
  hardware: rtx-5070-ti
  model: gemma4
  runtime: vllm
  orchestrator: builtin
  provider: lambda
  score: 0.740
  verified: 90 runs

edge-aws-qa-100

phi-4 / ollama · 0.737

mantle: edge-aws-qa-100
  hardware: t4
  model: phi-4
  runtime: ollama
  orchestrator: autogen
  provider: aws
  score: 0.737
  verified: 90 runs
Looking for setup details? See docs.