{
  "openapi": "3.1.0",
  "info": {
    "title": "Runware API - FLUX.1 [dev] SRPO",
    "summary": "FLUX.1 SRPO dev for high realism text to image",
    "description": "FLUX.1 [dev] SRPO is a 12B flow transformer finetuned with Tencent SRPO for higher realism and aesthetics in text guided image generation. It improves lighting, texture, and artifact control. Ideal for teams that need controllable, high quality image output from text prompts.",
    "version": "1.0.0",
    "x-model-id": "flux-1-dev-srpo",
    "x-air-id": "runware:111@1",
    "x-status": "live",
    "x-capabilities": [
      "text-to-image"
    ],
    "x-released-at": "2025-09-12T00:00:00Z",
    "x-cover-image": "https://assets.runware.ai/201104f8-8bbc-496d-9083-1fac91ac6618.jpg"
  },
  "servers": [
    {
      "url": "https://api.runware.ai/v1",
      "description": "Runware REST API"
    }
  ],
  "components": {
    "securitySchemes": {
      "apiKeyAuth": {
        "type": "http",
        "scheme": "bearer",
        "description": "Runware API Key (e.g., Bearer <your-key>)"
      }
    },
    "schemas": {
      "AuthenticationTask": {
        "title": "Authentication",
        "description": "Authenticates a connection using an API key. Can be sent as the first element of the request array as an alternative to using the Authorization header.",
        "type": "object",
        "x-response-schema": "https://schemas.runware.ai/responses/utilities/authentication.json",
        "properties": {
          "taskType": {
            "const": "authentication",
            "title": "Task Type",
            "description": "The type of task to perform."
          },
          "apiKey": {
            "title": "API Key",
            "description": "Your Runware API key.",
            "type": "string"
          },
          "connectionSessionUUID": {
            "title": "Connection Session UUID",
            "description": "Optional session UUID to resume a previous connection and receive any buffered results.",
            "type": "string",
            "format": "uuid"
          }
        },
        "required": [
          "taskType",
          "apiKey"
        ],
        "additionalProperties": false
      },
      "RequestBody": {
        "type": "array",
        "items": {
          "type": "object",
          "required": [
            "positivePrompt",
            "taskType",
            "taskUUID",
            "model"
          ],
          "allOf": [
            {
              "dependentRequired": {
                "width": [
                  "height"
                ],
                "height": [
                  "width"
                ]
              }
            }
          ],
          "additionalProperties": false,
          "properties": {
            "inputs": {
              "title": "Inputs",
              "description": "The unified payload wrapper for complex media assets dictating image, video or audio inference constraints.",
              "type": "object",
              "properties": {
                "seedImage": {
                  "title": "Seed Image",
                  "description": "Image used as a starting point for the generation (UUID, URL, Data URI, or Base64).",
                  "type": "string",
                  "anyOf": [
                    {
                      "format": "uuid"
                    },
                    {
                      "format": "uri"
                    },
                    {
                      "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                    },
                    {
                      "pattern": "^[a-zA-Z0-9+/=]+$"
                    }
                  ]
                },
                "maskImage": {
                  "title": "Mask Image",
                  "description": "Image used to specify which areas of the seed image should be edited (UUID, URL, Data URI, or Base64).",
                  "type": "string",
                  "anyOf": [
                    {
                      "format": "uuid"
                    },
                    {
                      "format": "uri"
                    },
                    {
                      "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                    },
                    {
                      "pattern": "^[a-zA-Z0-9+/=]+$"
                    }
                  ]
                }
              },
              "additionalProperties": false
            },
            "positivePrompt": {
              "title": "Positive Prompt",
              "description": "Text prompt describing elements to include in the generated output.",
              "type": "string",
              "minLength": 2,
              "maxLength": 3000
            },
            "negativePrompt": {
              "title": "Negative Prompt",
              "description": "Prompt to guide what to exclude from generation. Ignored when guidance is disabled (CFGScale ≤ 1).",
              "type": "string",
              "minLength": 2,
              "maxLength": 3000
            },
            "width": {
              "title": "Width",
              "description": "Width of the generated media in pixels.",
              "type": "integer",
              "minimum": 128,
              "maximum": 2048,
              "multipleOf": 16
            },
            "height": {
              "title": "Height",
              "description": "Height of the generated media in pixels.",
              "type": "integer",
              "minimum": 128,
              "maximum": 2048,
              "multipleOf": 16
            },
            "seed": {
              "title": "Seed",
              "description": "Random seed for reproducible generation. When not provided, a random seed is generated in the unsigned 32-bit range.",
              "type": "integer",
              "minimum": 0,
              "maximum": 9223372036854776000
            },
            "steps": {
              "title": "Steps",
              "description": "Total number of denoising steps. Higher values generally produce more detailed results but take longer.",
              "type": "integer",
              "minimum": 1,
              "maximum": 50
            },
            "scheduler": {
              "title": "Scheduler",
              "description": "Scheduler to use for the diffusion process.",
              "type": "string",
              "enum": [
                "DDIM",
                "DDIMScheduler",
                "DDPMScheduler",
                "DEISMultistepScheduler",
                "Default",
                "DPM++",
                "DPM++ 2M",
                "DPM++ 2M Beta",
                "DPM++ 2M Exponential",
                "DPM++ 2M Karras",
                "DPM++ 2M SDE",
                "DPM++ 2M SDE Beta",
                "DPM++ 2M SDE Exponential",
                "DPM++ 2M SDE Karras",
                "DPM++ 2M SDE Uniform",
                "DPM++ 2M Uniform",
                "DPM++ 3M",
                "DPM++ 3M Beta",
                "DPM++ 3M Exponential",
                "DPM++ 3M Karras",
                "DPM++ 3M SDE Uniform",
                "DPM++ 3M Uniform",
                "DPM++ Beta",
                "DPM++ Exponential",
                "DPM++ Karras",
                "DPM++ SDE",
                "DPM++ SDE Beta",
                "DPM++ SDE Exponential",
                "DPM++ SDE Karras",
                "DPM++ Uniform",
                "DPM++ Uniform Beta",
                "DPM++ Uniform Exponential",
                "DPM++ Uniform Karras",
                "DPMSolverMultistepInverse",
                "DPMSolverMultistepScheduler",
                "DPMSolverSinglestepScheduler",
                "EDMDPMSolverMultistepScheduler",
                "EDMEulerScheduler",
                "Euler",
                "Euler a",
                "Euler Beta",
                "Euler DiscreteScheduler",
                "Euler Exponential",
                "Euler Karras",
                "EulerAncestralDiscreteScheduler",
                "FlowMatchEulerDiscreteScheduler",
                "Heun",
                "HeunDiscreteScheduler",
                "Heun Karras",
                "IPNDMScheduler",
                "IPNDM Uniform",
                "IPNDM Uniform Beta",
                "IPNDM Uniform Exponential",
                "IPNDM Uniform Karras",
                "KDPM2AncestralDiscreteScheduler",
                "KDPM2DiscreteScheduler",
                "LCM",
                "LCMScheduler",
                "LMS",
                "LMSDiscreteScheduler",
                "LMS Karras",
                "PNDMScheduler",
                "TCDScheduler",
                "UniPC",
                "UniPC 2M",
                "UniPC 2M Karras",
                "UniPC 2M Uniform",
                "UniPC 3M",
                "UniPC 3M Karras",
                "UniPC 3M Uniform",
                "UniPC Karras",
                "UniPC Uniform",
                "UniPC Uniform Beta",
                "UniPC Uniform Exponential",
                "UniPC Uniform Karras"
              ]
            },
            "CFGScale": {
              "title": "CFG Scale",
              "description": "Guidance scale representing how closely the output will resemble the prompt. Higher values produce results more aligned with the prompt.",
              "type": "number",
              "multipleOf": 0.01,
              "minimum": 0,
              "maximum": 20
            },
            "strength": {
              "title": "Strength",
              "description": "Strength of the transformation. Lower values result in more influence from the original input.",
              "type": "number",
              "multipleOf": 0.01,
              "minimum": 0,
              "maximum": 1,
              "default": 0.8
            },
            "maskMargin": {
              "title": "Mask Margin",
              "description": "Extra context pixels around the masked region during inpainting. The model zooms into the masked area with these additional pixels for better integration.",
              "type": "integer",
              "minimum": 32,
              "maximum": 128
            },
            "promptWeighting": {
              "title": "Prompt Weighting",
              "description": "Defines the syntax to be used for prompt weighting.\n\nPrompt weighting allows you to adjust how strongly different parts of your prompt influence the generated image. Choose between `compel` notation with advanced weighting operations or `sdEmbeds` for simple emphasis adjustments.\n\n**Compel syntax**:\n\nAdds 0.2 seconds to image inference time and incurs additional costs.\n\nWhen `compel` syntax is selected, you can use the following notation in prompts:\n\n**Weighting**\n\nSyntax: `+` `-` `(word)0.9`\n\nIncrease or decrease the attention given to specific words or phrases.\n\nExamples:\n\n- Single words: `small+ dog, pixar style`\n- Multiple words: `small dog, (pixar style)-`\n- Multiple symbols for more effect: `small+++ dog, pixar style`\n- Nested weighting: `(small+ dog)++, pixar style`\n- Explicit weight percentage: `small dog, (pixar)1.2 style`\n\n**Blend**\n\nSyntax: `.blend()`\n\nMerge multiple conditioning prompts.\n\nExample: `(\"small dog\", \"robot\").blend(1, 0.8)`\n\n**Conjunction**\n\nSyntax: `.and()`\n\nBreak a prompt into multiple clauses and pass them separately.\n\nExample: `(\"small dog\", \"pixar style\").and()`\n\n**Sdembeds syntax**:\n\nWhen `sdEmbeds` syntax is selected, you can use the following notation in prompts:\n\n**Weighting**\n\nSyntax: `(text)` `(text:number)` `[text]`\n\nUse parentheses `()` to increase attention, square brackets `[]` to decrease it. Add a number after the text to specify a custom multiplier.\n\nExamples:\n\n- Single words: `(small) dog, pixar style`\n- Multiple words: `small dog, [pixar style]`\n- Higher emphasis: `(small:2.5) dog, pixar style`\n- Combined emphasis: `(small dog:1.5), pixar style`\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#promptWeighting)",
              "type": "string",
              "enum": [
                "compel",
                "sdEmbeds"
              ]
            },
            "acceleratorOptions": {
              "title": "Accelerator Options",
              "description": "Advanced caching mechanisms to speed up generation.",
              "type": "object",
              "properties": {
                "cacheEndStep": {
                  "title": "Cache End Step",
                  "description": "Absolute step number to end caching. Must be greater than `cacheStartStep` and less than or equal to `steps`.",
                  "type": "integer",
                  "minimum": 1
                },
                "cacheEndStepPercentage": {
                  "title": "Cache End Step Percentage",
                  "description": "Percentage of steps to end caching. Alternative to `cacheEndStep`. Must be greater than `cacheStartStepPercentage`.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 100
                },
                "cacheMaxConsecutiveSteps": {
                  "title": "Cache Max Consecutive Steps",
                  "description": "Limits the maximum number of consecutive steps that can use cached computations before forcing a fresh computation.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 5,
                  "default": 3
                },
                "cacheStartStep": {
                  "title": "Cache Start Step",
                  "description": "Absolute step number to start caching. Must be less than `cacheEndStep`.",
                  "type": "integer",
                  "minimum": 0
                },
                "cacheStartStepPercentage": {
                  "title": "Cache Start Step Percentage",
                  "description": "Percentage of steps to start caching. Alternative to `cacheStartStep`. Must be less than `cacheEndStepPercentage`.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 99
                },
                "fbCache": {
                  "title": "FB Cache",
                  "description": "First Block Cache (FBCache) acceleration. Reuses feature block computations across steps.",
                  "type": "boolean",
                  "default": false
                },
                "fbCacheThreshold": {
                  "title": "FB Cache Threshold",
                  "description": "Controls the sensitivity threshold for determining when to reuse cached computations. Lower values reuse more aggressively.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.25
                },
                "teaCache": {
                  "title": "TeaCache",
                  "description": "TeaCache acceleration for transformer-based models. Estimates step differences to skip redundant computations.",
                  "type": "boolean",
                  "default": false
                },
                "teaCacheDistance": {
                  "title": "TeaCache Distance",
                  "description": "Controls the aggressiveness of the TeaCache feature. Lower values prioritize quality, higher values prioritize speed.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.5
                },
                "dbCache": {
                  "title": "DB Cache",
                  "description": "DB Cache (CacheDiT) acceleration. Caches and reuses intermediate transformer block outputs to skip redundant computations.",
                  "type": "boolean",
                  "default": false
                },
                "dbCacheThreshold": {
                  "title": "DB Cache Threshold",
                  "description": "Controls the sensitivity threshold for DB Cache. Lower values reuse cached blocks more aggressively, higher values prioritize quality.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.25
                },
                "dbCacheSkipInterval": {
                  "title": "DB Cache Skip Interval",
                  "description": "Controls how many steps to skip between cache refreshes. Higher values skip more steps for faster generation at the cost of quality.",
                  "type": "integer",
                  "minimum": 1,
                  "default": 5
                }
              },
              "allOf": [
                {
                  "not": {
                    "required": [
                      "cacheStartStep",
                      "cacheStartStepPercentage"
                    ]
                  }
                },
                {
                  "not": {
                    "required": [
                      "cacheEndStep",
                      "cacheEndStepPercentage"
                    ]
                  }
                }
              ],
              "additionalProperties": false
            },
            "outpaint": {
              "title": "Outpaint",
              "description": "Extends image boundaries in specified directions. Final width/height must account for original image plus extensions.",
              "type": "object",
              "properties": {
                "bottom": {
                  "title": "Outpaint Bottom",
                  "description": "Number of pixels to extend to the bottom.",
                  "type": "integer",
                  "minimum": 0
                },
                "left": {
                  "title": "Outpaint Left",
                  "description": "Number of pixels to extend to the left.",
                  "type": "integer",
                  "minimum": 0
                },
                "right": {
                  "title": "Outpaint Right",
                  "description": "Number of pixels to extend to the right.",
                  "type": "integer",
                  "minimum": 0
                },
                "top": {
                  "title": "Outpaint Top",
                  "description": "Number of pixels to extend to the top.",
                  "type": "integer",
                  "minimum": 0
                }
              },
              "additionalProperties": false
            },
            "lora": {
              "title": "LoRA",
              "description": "With LoRA (Low-Rank Adaptation), you can adapt a model to specific styles or features by emphasizing particular aspects of the data. This technique enhances the quality and relevance of generated content and can be especially useful when the output needs to adhere to a specific artistic style or follow particular guidelines.\n\nMultiple LoRA models can be used simultaneously to achieve different adaptation goals.\n\n**Examples**:\n\n```json\n\"lora\": [\n  {\n    \"model\": \"<lora-model-air>\",\n    \"weight\": 0.8\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#lora)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "LoRA Model",
                    "description": "LoRA model identifier.",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "LoRA Weight",
                    "description": "Strength of the LoRA influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the LoRA's style.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "transformer": {
                    "title": "LoRA Transformer",
                    "description": "Transformer stages to apply LoRA. Some video models use separate high-noise and low-noise processing stages, and LoRAs can be selectively applied to optimize their effectiveness.",
                    "type": "string",
                    "oneOf": [
                      {
                        "const": "high",
                        "title": "High",
                        "description": "Apply LoRA only to the high-noise processing stage (coarse structure and early generation steps)."
                      },
                      {
                        "const": "low",
                        "title": "Low",
                        "description": "Apply LoRA only to the low-noise processing stage (fine details and later generation steps)."
                      },
                      {
                        "const": "both",
                        "title": "Both",
                        "description": "Apply LoRA to both stages for full coverage."
                      }
                    ],
                    "default": "both"
                  }
                },
                "required": [
                  "model"
                ],
                "additionalProperties": false
              }
            },
            "controlNet": {
              "title": "ControlNet",
              "description": "With ControlNet, you can provide a guide image to help the model generate images that align with the desired structure. This guide image can be generated with our ControlNet preprocessing tool, extracting guidance information from an input image. The guide image can be in the form of an edge map, a pose, a depth estimation or any other type of control image that guides the generation process via the ControlNet model.\n\nMultiple ControlNet models can be used at the same time to provide different types of guidance information to the model.\n\n**Examples**:\n\n```json\n\"controlNet\": [\n  {\n    \"model\": \"<controlnet-model-air>\",\n    \"guideImage\": \"c64351d5-4c59-42f7-95e1-eace013eddab\",\n    \"weight\": 0.7,\n    \"startStep\": 0,\n    \"endStep\": 20,\n    \"controlMode\": \"controlnet\"\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#controlNet)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "ControlNet Model",
                    "description": "ControlNet model identifier.",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "ControlNet Weight",
                    "description": "Strength of the ControlNet influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the guide image.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "guideImage": {
                    "title": "Guide Image",
                    "description": "Reference image for ControlNet guidance (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  },
                  "controlMode": {
                    "title": "Control Mode",
                    "description": "ControlNet guidance mode.",
                    "type": "string",
                    "default": "balanced",
                    "oneOf": [
                      {
                        "const": "balanced",
                        "description": "Equal weight between ControlNet and prompt."
                      },
                      {
                        "const": "controlnet",
                        "description": "Prioritize ControlNet guidance."
                      },
                      {
                        "const": "prompt",
                        "description": "Prioritize prompt guidance."
                      }
                    ]
                  },
                  "endStep": {
                    "title": "ControlNet End Step",
                    "description": "Absolute step number to end ControlNet influence. Must be greater than `startStep` and less than or equal to `steps`.",
                    "type": "integer",
                    "minimum": 1
                  },
                  "endStepPercentage": {
                    "title": "End Step Percentage",
                    "description": "Percentage of steps to end ControlNet influence. Must be greater than `startStepPercentage`.",
                    "type": "integer",
                    "minimum": 1,
                    "maximum": 100
                  },
                  "startStep": {
                    "title": "Start Step",
                    "description": "Absolute step number to start ControlNet influence. Must be less than `endStep`.",
                    "type": "integer",
                    "minimum": 0
                  },
                  "startStepPercentage": {
                    "title": "Start Step Percentage",
                    "description": "Percentage of steps to start ControlNet influence. Must be less than `endStepPercentage`.",
                    "type": "integer",
                    "minimum": 0,
                    "maximum": 99
                  }
                },
                "required": [
                  "model",
                  "guideImage"
                ],
                "allOf": [
                  {
                    "not": {
                      "required": [
                        "startStep",
                        "startStepPercentage"
                      ]
                    }
                  },
                  {
                    "not": {
                      "required": [
                        "endStep",
                        "endStepPercentage"
                      ]
                    }
                  }
                ],
                "additionalProperties": false
              }
            },
            "ipAdapters": {
              "title": "IP Adapters",
              "description": "IP-Adapters enable image-prompted generation, allowing you to use reference images to guide the style and content of your generations. Multiple IP Adapters can be used simultaneously.\n\n**Examples**:\n\n```json\n\"ipAdapters\": [\n  {\n    \"model\": \"<ip-adapter-model-air>\",\n    \"guideImages\": [\"c64351d5-4c59-42f7-95e1-eace013eddab\"],\n    \"weight\": 0.75\n  },\n  {\n    \"model\": \"<ip-adapter-model-air>\",\n    \"guideImages\": [\"d7e8f9a0-2b5c-4e7f-a1d3-9c8b7a6e5d4f\"],\n    \"weight\": 0.5\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#ipAdapters)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "IP-Adapter Model",
                    "description": "We make use of the [AIR system](https://runware.ai/models) to identify IP-Adapter models. This identifier is a unique string that represents a specific model.\n\n**Supported models list**:\n\n| AIR ID | Model Name |\n| --- | --- |\n| runware:55@1 | IP Adapter SDXL |\n| runware:55@2 | IP Adapter SDXL Plus |\n| runware:55@3 | IP Adapter SDXL Plus Face |\n| runware:55@4 | IP Adapter SDXL Vit-H |\n| runware:55@5 | IP Adapter SD 1.5 |\n| runware:55@6 | IP Adapter SD 1.5 Plus |\n| runware:55@7 | IP Adapter SD 1.5 Light |\n| runware:55@8 | IP Adapter SD 1.5 Plus Face |\n| runware:55@10 | IP Adapter SD 1.5 Vit-G |\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#model)",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "IP-Adapter Weight",
                    "description": "Strength of the IP-Adapter influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the reference.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "guideImages": {
                    "title": "Guide Images",
                    "description": "Images to guide the IP-Adapter (UUID, URL, Data URI, or Base64).",
                    "type": "array",
                    "minItems": 1,
                    "items": {
                      "title": "Image",
                      "description": "Image input (UUID, URL, Data URI, or Base64).",
                      "type": "string",
                      "anyOf": [
                        {
                          "format": "uuid"
                        },
                        {
                          "format": "uri"
                        },
                        {
                          "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                        },
                        {
                          "pattern": "^[a-zA-Z0-9+/=]+$"
                        }
                      ]
                    }
                  },
                  "combineMethod": {
                    "title": "Combine Method",
                    "description": "Controls how multiple reference images are combined.",
                    "type": "string",
                    "enum": [
                      "concat",
                      "add",
                      "subtract",
                      "average",
                      "norm_average"
                    ],
                    "default": "concat"
                  },
                  "embedScaling": {
                    "title": "Embed Scaling",
                    "description": "Determines which embedding components are used and their strength.",
                    "type": "string",
                    "enum": [
                      "only_v",
                      "kv",
                      "kv_penalty_c",
                      "k_mean_v_penalty_c"
                    ],
                    "default": "kv"
                  },
                  "weightType": {
                    "title": "Weight Type",
                    "description": "Shapes how influence evolves during generation.",
                    "type": "string",
                    "enum": [
                      "normal",
                      "ease_in",
                      "ease_out",
                      "ease_in_out",
                      "weak_input",
                      "weak_output",
                      "weak_middle",
                      "strong_middle",
                      "style_transfer",
                      "composition",
                      "strong_style_transfer",
                      "style_and_composition",
                      "strong_style_and_composition"
                    ],
                    "default": "normal"
                  },
                  "weightComposition": {
                    "title": "Weight Composition",
                    "description": "Controls composition/layout influence specifically.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": 0,
                    "maximum": 1
                  }
                },
                "required": [
                  "model",
                  "guideImages"
                ],
                "additionalProperties": false
              }
            },
            "acePlusPlus": {
              "title": "ACE++",
              "description": "ACE++ is an advanced framework for character-consistent image generation and editing. It supports two distinct workflows: creating new images guided by a reference image, and editing existing images with precise control over specific regions.\n\nNote: When using the `acePlusPlus` object, you must set the [model](#request-model) parameter to `runware:102@1` (FLUX Fill).\n\n> [!NOTE]\n> The `masks` parameter is required when `type` is set to `local_editing`, and is not allowed for other types (`portrait`, `subject`). This determines the workflow: creation types use only reference images, while `local_editing` requires explicit masks to define the edit region.\n\n**Examples**:\n\n**Creation Workflow:** Generate new images that maintain the style, identity, or characteristics from a reference image. The model extracts visual features from the reference image and combines them with the text prompt to condition the generation process.\n\n```json\n\"acePlusPlus\": {\n  \"type\": \"portrait\",\n  \"images\": [\"59a2edc2-45e6-429f-be5f-7ded59b92046\"],\n  \"repaintingScale\": 0.5\n}\n```\n\n**Editing Workflow:** Modify specific regions of an existing image using guidance from a reference image. Uses an input mask to define the exact area to be edited while preserving the rest of the image unchanged.\n\n```json\n\"referenceImages\": [\"59a2edc2-45e6-429f-be5f-7ded59b92046\"],\n\"acePlusPlus\": {\n  \"type\": \"local_editing\",\n  \"images\": [\"59a2edc2-45e6-429f-be5f-7ded59b92046\"],\n  \"masks\": [\"90422a52-f186-4bf4-a73b-0a46016a8330\"],\n  \"repaintingScale\": 0.7\n}\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#acePlusPlus)",
              "type": "object",
              "properties": {
                "images": {
                  "title": "Images",
                  "description": "Reference images for character identity preservation (UUID, URL, Data URI, or Base64).",
                  "type": "array",
                  "minItems": 1,
                  "maxItems": 1,
                  "items": {
                    "title": "Image",
                    "description": "Image input (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  }
                },
                "masks": {
                  "title": "Masks",
                  "description": "Mask images for selective editing (UUID, URL, Data URI, or Base64).",
                  "type": "array",
                  "minItems": 1,
                  "maxItems": 1,
                  "items": {
                    "title": "Image",
                    "description": "Image input (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  }
                },
                "repaintingScale": {
                  "title": "Repainting Scale",
                  "description": "Balances original character identity (0.0) vs prompt adherence (1.0). Lower values preserve stronger character resemblance.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0
                },
                "type": {
                  "title": "ACE++ Type",
                  "description": "Task mode for ACE++ generation.",
                  "type": "string",
                  "default": "portrait",
                  "oneOf": [
                    {
                      "const": "portrait",
                      "description": "Face consistency."
                    },
                    {
                      "const": "subject",
                      "description": "Object consistency."
                    },
                    {
                      "const": "local_editing",
                      "description": "Region modification."
                    }
                  ]
                }
              },
              "required": [
                "images"
              ],
              "if": {
                "properties": {
                  "type": {
                    "const": "local_editing"
                  }
                }
              },
              "then": {
                "required": [
                  "masks"
                ]
              },
              "else": {
                "properties": {
                  "masks": false
                }
              },
              "additionalProperties": false
            },
            "hiresFix": {
              "title": "HiresFix",
              "description": "Two-stage generation for improved resolution and detail. The model generates at a lower resolution first, then upscales and refines the result in a second pass. Can be enabled with `true` for default settings, or configured as an object for fine-grained control over the upscaling model, steps, and strength.\n\nWhen using the object form, the `model` parameter is required. Available upscaling models:\n\n| Model | Name | Upscale Factor |\n| --- | --- | --- |\n| `runware:504@1` | RealESRGAN\\_x4plus | 4x |\n| `runware:realesrgan@anime-6b` | RealESRGAN\\_x4plus\\_anime\\_6B | 4x |\n| `runware:esrgan@animesharp` | 4x-AnimeSharp | 4x |\n| `runware:esrgan@ultrasharp` | 4x-UltraSharp | 4x |\n\n**Simple (boolean)**:\n\n```json\n\"hiresFix\": true\n```\n\n**Configured (object)**:\n\n```json\n\"hiresFix\": {\n  \"model\": \"runware:esrgan@ultrasharp\",\n  \"steps\": 15,\n  \"strength\": 0.6\n}\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#hiresFix)",
              "oneOf": [
                {
                  "type": "boolean",
                  "const": true
                },
                {
                  "type": "object",
                  "properties": {
                    "model": {
                      "title": "Model",
                      "description": "The upscaling model to use for hires fix.",
                      "type": "string",
                      "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$",
                      "enum": [
                        "runware:realesrgan@anime-6b",
                        "runware:esrgan@animesharp",
                        "runware:esrgan@ultrasharp",
                        "runware:504@1"
                      ]
                    },
                    "steps": {
                      "title": "Steps",
                      "description": "Total number of denoising steps. Higher values generally produce more detailed results but take longer.",
                      "type": "integer",
                      "minimum": 1,
                      "maximum": 35,
                      "default": 10
                    },
                    "strength": {
                      "title": "Strength",
                      "description": "Strength of the transformation. Lower values result in more influence from the original input.",
                      "type": "number",
                      "multipleOf": 0.01,
                      "minimum": 0,
                      "maximum": 1,
                      "default": 0.8
                    },
                    "upscaleFactor": {
                      "title": "Upscale Factor",
                      "description": "Factor by which to upscale the generated image. A value of 2 doubles width and height.",
                      "type": "integer",
                      "enum": [
                        4
                      ],
                      "default": 4
                    }
                  },
                  "required": [
                    "model"
                  ],
                  "additionalProperties": false
                }
              ]
            },
            "layerDiffuse": {
              "title": "Layer Diffuse",
              "description": "Enables LayerDiffuse technology, which allows for the direct generation of images with transparency (alpha channels).\n\nWhen enabled, this feature applies the necessary LwwwwwwoRA and VAE components to produce high-quality transparent images without requiring post-processing background removal.\n\nThis is particularly useful for creating product images, overlays, composites, and other content that requires transparency. The output must be in a format that supports transparency, such as PNG.\n\nNote: This feature is only available for the FLUX model architecture. It automatically applies the equivalent of:\n\n```json\n\n  \"lora\": [{ \"model\": \"runware:120@2\" }],\n  \"vae\": \"runware:120@4\"\n```\n\n**Examples**:\n\n```json\n\"outputFormat\": \"png\",\n\"advancedFeatures\": {\n  \"layerDiffuse\": true\n}\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#layerDiffuse)",
              "type": "boolean",
              "default": false
            },
            "pulid": {
              "title": "PuLID",
              "description": "PuLID (Pure and Lightning ID Customization) enables fast and high-quality identity customization for text-to-image generation. This object allows you to configure settings for transferring facial characteristics from a reference image to generated images with high fidelity.\n\n> [!NOTE]\n> `CFGstartStep` and `CFGstartStepPercentage` are mutually exclusive, they both control when CFG guidance begins, but as an absolute step number or a percentage respectively. Use one or the other, not both.\n\n**Examples**:\n\n```json\n\"puLID\": {\n  \"images\": [\"59a2edc2-45e6-429f-be5f-7ded59b92046\"],\n  \"idWeight\": 1,\n  \"trueCFGScale\": 1.5,\n  \"CFGStartStep\": 3\n}\n```\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#pulid)",
              "type": "object",
              "properties": {
                "CFGstartStep": {
                  "title": "CFG Start Step",
                  "description": "Absolute step number to start identity influence. Must be less than `steps`.",
                  "type": "integer",
                  "minimum": 0
                },
                "CFGstartStepPercentage": {
                  "title": "CFG Start Step Percentage",
                  "description": "Percentage of steps to start identity influence.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 99
                },
                "idWeight": {
                  "title": "ID Weight",
                  "description": "Identity preservation strength. Higher values create closer resemblance to the reference face.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 3,
                  "default": 1
                },
                "images": {
                  "title": "Images",
                  "description": "Reference images for identity customization (UUID, URL, Data URI, or Base64).",
                  "type": "array",
                  "minItems": 1,
                  "maxItems": 1,
                  "items": {
                    "title": "Image",
                    "description": "Image input (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  }
                },
                "trueCFGScale": {
                  "title": "True CFG Scale",
                  "description": "Guidance scale for identity embedding.",
                  "type": "number",
                  "multipleOf": 0.1,
                  "minimum": 0,
                  "maximum": 10
                }
              },
              "required": [
                "images"
              ],
              "allOf": [
                {
                  "not": {
                    "required": [
                      "CFGstartStep",
                      "CFGstartStepPercentage"
                    ]
                  }
                }
              ],
              "additionalProperties": false
            },
            "trueCFGScale": {
              "title": "True CFG Scale",
              "description": "True Classifier-Free Guidance scale. Higher values increase prompt adherence at the cost of quality.",
              "type": "number"
            },
            "ultralytics": {
              "title": "Ultralytics Features",
              "description": "Configuration object for Ultralytics face enhancement during generation. This feature uses face detection and inpainting to improve facial details in the same generation step, without requiring post-processing.\n\n> [!NOTE]\n> Face enhancement is available for Stable Diffusion 1.X, SDXL, and FLUX models. The system automatically detects faces and applies targeted refinement to improve quality while maintaining consistency with the overall generation.\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#ultralytics)",
              "type": "object",
              "properties": {
                "CFGScale": {
                  "title": "CFG Scale",
                  "description": "Face refinement guidance scale.",
                  "type": "number",
                  "multipleOf": 0.1,
                  "minimum": 0,
                  "maximum": 50,
                  "default": 8
                },
                "confidence": {
                  "title": "Confidence",
                  "description": "Confidence threshold for detection.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.9
                },
                "maskBlur": {
                  "title": "Mask Blur",
                  "description": "Mask feathering amount. Higher values create softer transitions between the enhanced face region and surrounding areas.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 100,
                  "default": 5
                },
                "maskPadding": {
                  "title": "Mask Padding",
                  "description": "Padding around detected face in pixels. Expands the refinement area to include surrounding context like hair and neck.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 20,
                  "default": 5
                },
                "negativePrompt": {
                  "title": "Negative Prompt",
                  "description": "Negative prompt for detection.",
                  "type": "string"
                },
                "positivePrompt": {
                  "title": "Positive Prompt",
                  "description": "Positive prompt for detection.",
                  "type": "string"
                },
                "steps": {
                  "title": "Steps",
                  "description": "Number of face refinement steps.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 100,
                  "default": 20
                },
                "strength": {
                  "title": "Strength",
                  "description": "Refinement strength. Lower values preserve more of the original, higher values allow more aggressive reconstruction.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.3
                }
              },
              "additionalProperties": false
            },
            "taskType": {
              "title": "Task Type",
              "description": "Identifier for the type of task being performed",
              "type": "string",
              "const": "imageInference"
            },
            "taskUUID": {
              "title": "Task UUID",
              "description": "UUID v4 identifier for tracking tasks and matching async responses. Must be unique per task.",
              "type": "string",
              "format": "uuid"
            },
            "webhookURL": {
              "title": "Webhook URL",
              "description": "Specifies a webhook URL where JSON responses will be sent via HTTP POST when generation tasks complete. For batch requests with multiple results, each completed item triggers a separate webhook call as it becomes available.",
              "type": "string",
              "format": "uri"
            },
            "includeCost": {
              "title": "Include Cost",
              "description": "Include task cost in the response.",
              "type": "boolean",
              "default": false
            },
            "model": {
              "title": "Model",
              "description": "Identifier of the model to use for generation.",
              "type": "string",
              "const": "runware:111@1"
            },
            "numberResults": {
              "title": "Number of Results",
              "description": "Number of results to generate. Each result uses a different seed, producing variations of the same parameters.",
              "type": "integer",
              "minimum": 1,
              "default": 1,
              "maximum": 20
            },
            "uploadEndpoint": {
              "title": "Upload Endpoint",
              "description": "Specifies a URL where the generated content will be automatically uploaded using the HTTP PUT method. The raw binary data of the media file is sent directly as the request body. For secure uploads to cloud storage, use presigned URLs that include temporary authentication credentials.\n\n**Common use cases:**\n\n- **Cloud storage**: Upload directly to S3 buckets, Google Cloud Storage, or Azure Blob Storage using presigned URLs.\n- **CDN integration**: Upload to content delivery networks for immediate distribution.\n\n```text\n// S3 presigned URL for secure upload\nhttps://your-bucket.s3.amazonaws.com/generated/content.mp4?X-Amz-Signature=abc123&X-Amz-Expires=3600\n\n// Google Cloud Storage presigned URL\nhttps://storage.googleapis.com/your-bucket/content.jpg?X-Goog-Signature=xyz789\n\n// Custom storage endpoint\nhttps://storage.example.com/uploads/generated-image.jpg\n```\n\nThe content data will be sent as the request body to the specified URL when generation is complete.\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#uploadEndpoint)",
              "type": "string",
              "format": "uri"
            },
            "ttl": {
              "title": "TTL",
              "description": "Time-to-live (TTL) in seconds for generated content. Only applies when `outputType` is `URL`.",
              "type": "integer",
              "minimum": 60
            },
            "outputType": {
              "title": "Output Type",
              "description": "Image output type.",
              "type": "string",
              "enum": [
                "URL",
                "base64Data",
                "dataURI"
              ],
              "default": "URL"
            },
            "outputFormat": {
              "title": "Output Format",
              "description": "Specifies the file format of the generated output. The available values depend on the task type and the specific model's capabilities.\n\n- \\`JPG\\`: Best for photorealistic images with smaller file sizes (no transparency).\n- \\`PNG\\`: Lossless compression, supports high quality and transparency (alpha channel).\n- \\`WEBP\\`: Modern format providing superior compression and transparency support.\n- \\`MP4\\`: Widely supported video container (H.264), recommended for general use.\n- \\`WEBM\\`: Optimized for web delivery.\n- \\`MOV\\`: QuickTime format, common in professional workflows (Apple ecosystem).\n- \\`GIF\\`: Animated image format (no audio), suitable for short loops or previews.\n- \\`MP3\\`: Compressed audio, smaller file size.\n- \\`WAV\\`: Uncompressed, high-quality audio.\n- \\`FLAC\\`: Lossless compression.\n- \\`OGG\\`: Open-source compressed audio format (Vorbis codec).\n- \\`SVG\\`: Scalable Vector Graphics.\n- \\`TIFF\\`: High-quality output supporting layers.\n\n> [!NOTE]\n> \\*\\*Transparency\\*\\*: If you are using features like background removal or LayerDiffuse that require transparency, you must select a format that supports an alpha channel (e.g., \\`PNG\\`, \\`WEBP\\`, \\`TIFF\\`). \\`JPG\\` does not support transparency.\n\n[Read full documentation](https://runware.ai/docs/models/flux-1-dev-srpo#outputFormat)",
              "type": "string",
              "enum": [
                "JPG",
                "PNG",
                "WEBP"
              ],
              "default": "JPG"
            },
            "outputQuality": {
              "title": "Output Quality",
              "description": "Compression quality of the output. Higher values preserve quality but increase file size.",
              "type": "integer",
              "minimum": 20,
              "maximum": 99,
              "default": 95
            },
            "deliveryMethod": {
              "title": "Delivery Method",
              "description": "Determines how the API delivers task results.",
              "type": "string",
              "oneOf": [
                {
                  "const": "sync",
                  "description": "Returns complete results directly in the API response."
                },
                {
                  "const": "async",
                  "description": "Returns an immediate acknowledgment with the task UUID. Poll for results using getResponse."
                }
              ],
              "default": "sync"
            },
            "safety": {
              "type": "object",
              "title": "Safety Settings",
              "description": "Content safety checking configuration for image generation.",
              "properties": {
                "checkContent": {
                  "title": "Check Content",
                  "description": "Enable or disable content safety checking. When enabled, defaults to `fast` mode.",
                  "type": "boolean",
                  "default": false
                },
                "mode": {
                  "description": "Safety checking mode for image generation.",
                  "type": "string",
                  "oneOf": [
                    {
                      "const": "none",
                      "title": "None",
                      "description": "Disables checking."
                    },
                    {
                      "const": "fast",
                      "title": "Fast",
                      "description": "Performs a single check."
                    }
                  ],
                  "default": "none"
                }
              },
              "additionalProperties": false
            }
          }
        },
        "description": "You must always POST an array of task objects."
      },
      "ResponseBody": {
        "type": "object",
        "properties": {
          "data": {
            "type": "array",
            "items": {
              "type": "object",
              "description": "Unknown response structure"
            }
          }
        }
      },
      "ErrorResponse": {
        "title": "Error Response",
        "description": "Standard error response returned by the Runware API.",
        "type": "object",
        "properties": {
          "errors": {
            "type": "array",
            "items": {
              "type": "object",
              "required": [
                "code",
                "message"
              ],
              "additionalProperties": true,
              "properties": {
                "code": {
                  "type": "string",
                  "description": "A short identifier for the error (e.g., invalidApiKey, timeoutProvider)."
                },
                "message": {
                  "type": "string",
                  "description": "A human-readable explanation of what went wrong."
                },
                "parameter": {
                  "type": "string",
                  "description": "The request parameter related to the error, if applicable."
                },
                "taskType": {
                  "type": "string",
                  "description": "The task type of the request that failed."
                },
                "taskUUID": {
                  "type": "string",
                  "description": "The unique identifier of the failed request."
                },
                "documentation": {
                  "type": "string",
                  "description": "A link to relevant documentation."
                }
              }
            }
          }
        },
        "required": [
          "errors"
        ],
        "additionalProperties": false
      }
    }
  },
  "paths": {
    "/": {
      "post": {
        "summary": "Run FLUX.1 [dev] SRPO",
        "description": "FLUX.1 [dev] SRPO is a 12B flow transformer finetuned with Tencent SRPO for higher realism and aesthetics in text guided image generation. It improves lighting, texture, and artifact control. Ideal for teams that need controllable, high quality image output from text prompts.",
        "operationId": "run_flux_1_dev_srpo",
        "requestBody": {
          "required": true,
          "content": {
            "application/json": {
              "schema": {
                "$ref": "#/components/schemas/RequestBody"
              }
            }
          }
        },
        "responses": {
          "200": {
            "description": "Successful response",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ResponseBody"
                }
              }
            }
          },
          "400": {
            "description": "Bad Request — Missing or invalid parameters.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "401": {
            "description": "Unauthorized — No valid API key provided.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "402": {
            "description": "Payment Required — Insufficient account balance.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "403": {
            "description": "Forbidden — The API key lacks permissions for this request.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "429": {
            "description": "Too Many Requests — Rate limit exceeded.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "500": {
            "description": "Server Error — Something went wrong on Runware's end.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "503": {
            "description": "Service Unavailable — Temporarily unavailable (maintenance or capacity).",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          }
        }
      }
    }
  },
  "security": [
    {
      "apiKeyAuth": []
    }
  ]
}