{
  "openapi": "3.1.0",
  "info": {
    "title": "Runware API - SD XL v1.0 VAE Fix",
    "summary": "SDXL v1.0 with improved VAE for sharper images",
    "description": "SD XL v1.0 VAE Fix refines the original SDXL image backbone with an improved VAE for cleaner detail and more stable colors. It targets high quality image generation across diverse styles and resolutions. Ideal for developers who need reliable diffusion outputs.",
    "version": "1.0.0",
    "x-model-id": "stabilityai-stable-diffusion-xl-v1-0-vae-fix",
    "x-air-id": "civitai:101055@128078",
    "x-status": "live",
    "x-capabilities": [
      "text-to-image",
      "image-to-image"
    ],
    "x-released-at": "2024-09-17T00:00:00",
    "x-cover-image": "https://assets.runware.ai/e544d9bf-6f91-42f8-b07b-7754c7f9c16a.jpg",
    "x-pricing": {
      "overview": "Each image generation costs $0.0019 at 1024x1024.",
      "examples": [
        {
          "configuration": "1024x1024 · 30 steps",
          "price": "$0.0019"
        }
      ]
    }
  },
  "servers": [
    {
      "url": "https://api.runware.ai/v1",
      "description": "Runware REST API"
    }
  ],
  "components": {
    "securitySchemes": {
      "apiKeyAuth": {
        "type": "http",
        "scheme": "bearer",
        "description": "Runware API Key (e.g., Bearer <your-key>)"
      }
    },
    "schemas": {
      "AuthenticationTask": {
        "title": "Authentication",
        "description": "Authenticates a connection using an API key. Can be sent as the first element of the request array as an alternative to using the Authorization header.",
        "type": "object",
        "x-response-schema": "https://schemas.runware.ai/responses/utilities/authentication.json",
        "properties": {
          "taskType": {
            "const": "authentication",
            "title": "Task Type",
            "description": "The type of task to perform."
          },
          "apiKey": {
            "title": "API Key",
            "description": "Your Runware API key.",
            "type": "string"
          },
          "connectionSessionUUID": {
            "title": "Connection Session UUID",
            "description": "Optional session UUID to resume a previous connection and receive any buffered results.",
            "type": "string",
            "format": "uuid"
          }
        },
        "required": [
          "taskType",
          "apiKey"
        ],
        "additionalProperties": false
      },
      "RequestBody": {
        "type": "array",
        "items": {
          "type": "object",
          "properties": {
            "model": {
              "title": "Model",
              "description": "Identifier of the model to use for generation.",
              "type": "string",
              "const": "civitai:101055@128078"
            },
            "width": {
              "title": "Width",
              "description": "Width of the generated media in pixels.",
              "type": "integer",
              "default": 1024
            },
            "height": {
              "title": "Height",
              "description": "Height of the generated media in pixels.",
              "type": "integer",
              "default": 1024
            },
            "steps": {
              "title": "Steps",
              "description": "Total number of denoising steps. Higher values generally produce more detailed results but take longer.",
              "type": "integer",
              "minimum": 1,
              "default": 30
            },
            "inputs": {
              "title": "Inputs",
              "description": "The unified payload wrapper for complex media assets dictating image, video or audio inference constraints.",
              "type": "object",
              "properties": {
                "seedImage": {
                  "title": "Seed Image",
                  "description": "Image used as a starting point for the generation (UUID, URL, Data URI, or Base64).",
                  "type": "string",
                  "anyOf": [
                    {
                      "format": "uuid"
                    },
                    {
                      "format": "uri"
                    },
                    {
                      "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                    },
                    {
                      "pattern": "^[a-zA-Z0-9+/=]+$"
                    }
                  ]
                },
                "maskImage": {
                  "title": "Mask Image",
                  "description": "Image used to specify which areas of the seed image should be edited (UUID, URL, Data URI, or Base64).",
                  "type": "string",
                  "anyOf": [
                    {
                      "format": "uuid"
                    },
                    {
                      "format": "uri"
                    },
                    {
                      "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                    },
                    {
                      "pattern": "^[a-zA-Z0-9+/=]+$"
                    }
                  ]
                }
              },
              "additionalProperties": false
            },
            "positivePrompt": {
              "title": "Positive Prompt",
              "description": "Text prompt describing elements to include in the generated output.",
              "type": "string",
              "minLength": 2,
              "maxLength": 3000
            },
            "negativePrompt": {
              "title": "Negative Prompt",
              "description": "Prompt to guide what to exclude from generation. Ignored when guidance is disabled (CFGScale ≤ 1).",
              "type": "string",
              "minLength": 2,
              "maxLength": 3000
            },
            "seed": {
              "title": "Seed",
              "description": "Random seed for reproducible generation. When not provided, a random seed is generated in the unsigned 32-bit range.",
              "type": "integer",
              "minimum": 0,
              "maximum": 9223372036854776000
            },
            "scheduler": {
              "title": "Scheduler",
              "description": "Scheduler to use for the diffusion process.",
              "type": "string",
              "enum": [
                "DDIM",
                "DDIMScheduler",
                "DDPMScheduler",
                "DEISMultistepScheduler",
                "Default",
                "DPM++",
                "DPM++ 2M",
                "DPM++ 2M Beta",
                "DPM++ 2M Exponential",
                "DPM++ 2M Karras",
                "DPM++ 2M SDE",
                "DPM++ 2M SDE Beta",
                "DPM++ 2M SDE Exponential",
                "DPM++ 2M SDE Karras",
                "DPM++ 2M SDE Uniform",
                "DPM++ 2M Uniform",
                "DPM++ 3M",
                "DPM++ 3M Beta",
                "DPM++ 3M Exponential",
                "DPM++ 3M Karras",
                "DPM++ 3M SDE Uniform",
                "DPM++ 3M Uniform",
                "DPM++ Beta",
                "DPM++ Exponential",
                "DPM++ Karras",
                "DPM++ SDE",
                "DPM++ SDE Beta",
                "DPM++ SDE Exponential",
                "DPM++ SDE Karras",
                "DPM++ Uniform",
                "DPM++ Uniform Beta",
                "DPM++ Uniform Exponential",
                "DPM++ Uniform Karras",
                "DPMSolverMultistepInverse",
                "DPMSolverMultistepScheduler",
                "DPMSolverSinglestepScheduler",
                "EDMDPMSolverMultistepScheduler",
                "EDMEulerScheduler",
                "Euler",
                "Euler a",
                "Euler Beta",
                "Euler DiscreteScheduler",
                "Euler Exponential",
                "Euler Karras",
                "EulerAncestralDiscreteScheduler",
                "FlowMatchEulerDiscreteScheduler",
                "Heun",
                "HeunDiscreteScheduler",
                "Heun Karras",
                "IPNDMScheduler",
                "IPNDM Uniform",
                "IPNDM Uniform Beta",
                "IPNDM Uniform Exponential",
                "IPNDM Uniform Karras",
                "KDPM2AncestralDiscreteScheduler",
                "KDPM2DiscreteScheduler",
                "LCM",
                "LCMScheduler",
                "LMS",
                "LMSDiscreteScheduler",
                "LMS Karras",
                "PNDMScheduler",
                "TCDScheduler",
                "UniPC",
                "UniPC 2M",
                "UniPC 2M Karras",
                "UniPC 2M Uniform",
                "UniPC 3M",
                "UniPC 3M Karras",
                "UniPC 3M Uniform",
                "UniPC Karras",
                "UniPC Uniform",
                "UniPC Uniform Beta",
                "UniPC Uniform Exponential",
                "UniPC Uniform Karras"
              ]
            },
            "CFGScale": {
              "title": "CFG Scale",
              "description": "Guidance scale representing how closely the output will resemble the prompt. Higher values produce results more aligned with the prompt.",
              "type": "number",
              "multipleOf": 0.01,
              "minimum": 0,
              "maximum": 30
            },
            "strength": {
              "title": "Strength",
              "description": "Strength of the transformation. Lower values result in more influence from the original input.",
              "type": "number",
              "multipleOf": 0.01,
              "minimum": 0,
              "maximum": 1,
              "default": 0.8
            },
            "maskMargin": {
              "title": "Mask Margin",
              "description": "Extra context pixels around the masked region during inpainting. The model zooms into the masked area with these additional pixels for better integration.",
              "type": "integer",
              "minimum": 32,
              "maximum": 128
            },
            "clipSkip": {
              "title": "CLIP Skip",
              "description": "Number of layers to skip in the CLIP model.",
              "type": "integer",
              "minimum": 0,
              "maximum": 4
            },
            "promptWeighting": {
              "title": "Prompt Weighting",
              "description": "Defines the syntax to be used for prompt weighting.\n\nPrompt weighting allows you to adjust how strongly different parts of your prompt influence the generated image. Choose between `compel` notation with advanced weighting operations or `sdEmbeds` for simple emphasis adjustments.\n\n**Compel syntax**:\n\nAdds 0.2 seconds to image inference time and incurs additional costs.\n\nWhen `compel` syntax is selected, you can use the following notation in prompts:\n\n**Weighting**\n\nSyntax: `+` `-` `(word)0.9`\n\nIncrease or decrease the attention given to specific words or phrases.\n\nExamples:\n\n- Single words: `small+ dog, pixar style`\n- Multiple words: `small dog, (pixar style)-`\n- Multiple symbols for more effect: `small+++ dog, pixar style`\n- Nested weighting: `(small+ dog)++, pixar style`\n- Explicit weight percentage: `small dog, (pixar)1.2 style`\n\n**Blend**\n\nSyntax: `.blend()`\n\nMerge multiple conditioning prompts.\n\nExample: `(\"small dog\", \"robot\").blend(1, 0.8)`\n\n**Conjunction**\n\nSyntax: `.and()`\n\nBreak a prompt into multiple clauses and pass them separately.\n\nExample: `(\"small dog\", \"pixar style\").and()`\n\n**Sdembeds syntax**:\n\nWhen `sdEmbeds` syntax is selected, you can use the following notation in prompts:\n\n**Weighting**\n\nSyntax: `(text)` `(text:number)` `[text]`\n\nUse parentheses `()` to increase attention, square brackets `[]` to decrease it. Add a number after the text to specify a custom multiplier.\n\nExamples:\n\n- Single words: `(small) dog, pixar style`\n- Multiple words: `small dog, [pixar style]`\n- Higher emphasis: `(small:2.5) dog, pixar style`\n- Combined emphasis: `(small dog:1.5), pixar style`\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#promptWeighting)",
              "type": "string",
              "enum": [
                "compel",
                "sdEmbeds"
              ]
            },
            "acceleratorOptions": {
              "title": "Accelerator Options",
              "description": "Advanced caching mechanisms to speed up generation.",
              "type": "object",
              "properties": {
                "cacheEndStep": {
                  "title": "Cache End Step",
                  "description": "Absolute step number to end caching. Must be greater than `cacheStartStep` and less than or equal to `steps`.",
                  "type": "integer",
                  "minimum": 1
                },
                "cacheEndStepPercentage": {
                  "title": "Cache End Step Percentage",
                  "description": "Percentage of steps to end caching. Alternative to `cacheEndStep`. Must be greater than `cacheStartStepPercentage`.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 100
                },
                "cacheMaxConsecutiveSteps": {
                  "title": "Cache Max Consecutive Steps",
                  "description": "Limits the maximum number of consecutive steps that can use cached computations before forcing a fresh computation.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 5,
                  "default": 3
                },
                "cacheStartStep": {
                  "title": "Cache Start Step",
                  "description": "Absolute step number to start caching. Must be less than `cacheEndStep`.",
                  "type": "integer",
                  "minimum": 0
                },
                "cacheStartStepPercentage": {
                  "title": "Cache Start Step Percentage",
                  "description": "Percentage of steps to start caching. Alternative to `cacheStartStep`. Must be less than `cacheEndStepPercentage`.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 99
                },
                "teaCache": {
                  "title": "TeaCache",
                  "description": "TeaCache acceleration for transformer-based models. Estimates step differences to skip redundant computations.",
                  "type": "boolean",
                  "default": false
                },
                "teaCacheDistance": {
                  "title": "TeaCache Distance",
                  "description": "Controls the aggressiveness of the TeaCache feature. Lower values prioritize quality, higher values prioritize speed.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.5
                },
                "deepCache": {
                  "title": "DeepCache",
                  "description": "DeepCache acceleration. Skips transformer computations in certain steps to speed up generation.",
                  "type": "boolean"
                },
                "deepCacheInterval": {
                  "title": "DeepCache Interval",
                  "description": "Interval for DeepCache acceleration. A value of 2 skips every other step, 3 skips two out of three, etc.",
                  "type": "integer",
                  "minimum": 1
                },
                "deepCacheBranchId": {
                  "title": "DeepCache Branch ID",
                  "description": "Branch ID for DeepCache acceleration. Determines which U-Net layers are skipped.",
                  "type": "integer",
                  "minimum": 0
                }
              },
              "allOf": [
                {
                  "not": {
                    "required": [
                      "cacheStartStep",
                      "cacheStartStepPercentage"
                    ]
                  }
                },
                {
                  "not": {
                    "required": [
                      "cacheEndStep",
                      "cacheEndStepPercentage"
                    ]
                  }
                }
              ],
              "additionalProperties": false
            },
            "outpaint": {
              "title": "Outpaint",
              "description": "Extends image boundaries in specified directions. Final width/height must account for original image plus extensions.",
              "type": "object",
              "properties": {
                "bottom": {
                  "title": "Outpaint Bottom",
                  "description": "Number of pixels to extend to the bottom.",
                  "type": "integer",
                  "minimum": 0
                },
                "left": {
                  "title": "Outpaint Left",
                  "description": "Number of pixels to extend to the left.",
                  "type": "integer",
                  "minimum": 0
                },
                "right": {
                  "title": "Outpaint Right",
                  "description": "Number of pixels to extend to the right.",
                  "type": "integer",
                  "minimum": 0
                },
                "top": {
                  "title": "Outpaint Top",
                  "description": "Number of pixels to extend to the top.",
                  "type": "integer",
                  "minimum": 0
                }
              },
              "additionalProperties": false
            },
            "lora": {
              "title": "LoRA",
              "description": "With LoRA (Low-Rank Adaptation), you can adapt a model to specific styles or features by emphasizing particular aspects of the data. This technique enhances the quality and relevance of generated content and can be especially useful when the output needs to adhere to a specific artistic style or follow particular guidelines.\n\nMultiple LoRA models can be used simultaneously to achieve different adaptation goals.\n\n**Examples**:\n\n```json\n\"lora\": [\n  {\n    \"model\": \"<lora-model-air>\",\n    \"weight\": 0.8\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#lora)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "LoRA Model",
                    "description": "LoRA model identifier.",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "LoRA Weight",
                    "description": "Strength of the LoRA influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the LoRA's style.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "transformer": {
                    "title": "LoRA Transformer",
                    "description": "Transformer stages to apply LoRA. Some video models use separate high-noise and low-noise processing stages, and LoRAs can be selectively applied to optimize their effectiveness.",
                    "type": "string",
                    "oneOf": [
                      {
                        "const": "high",
                        "title": "High",
                        "description": "Apply LoRA only to the high-noise processing stage (coarse structure and early generation steps)."
                      },
                      {
                        "const": "low",
                        "title": "Low",
                        "description": "Apply LoRA only to the low-noise processing stage (fine details and later generation steps)."
                      },
                      {
                        "const": "both",
                        "title": "Both",
                        "description": "Apply LoRA to both stages for full coverage."
                      }
                    ],
                    "default": "both"
                  }
                },
                "required": [
                  "model"
                ],
                "additionalProperties": false
              }
            },
            "controlNet": {
              "title": "ControlNet",
              "description": "With ControlNet, you can provide a guide image to help the model generate images that align with the desired structure. This guide image can be generated with our ControlNet preprocessing tool, extracting guidance information from an input image. The guide image can be in the form of an edge map, a pose, a depth estimation or any other type of control image that guides the generation process via the ControlNet model.\n\nMultiple ControlNet models can be used at the same time to provide different types of guidance information to the model.\n\n**Examples**:\n\n```json\n\"controlNet\": [\n  {\n    \"model\": \"<controlnet-model-air>\",\n    \"guideImage\": \"c64351d5-4c59-42f7-95e1-eace013eddab\",\n    \"weight\": 0.7,\n    \"startStep\": 0,\n    \"endStep\": 20,\n    \"controlMode\": \"controlnet\"\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#controlNet)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "ControlNet Model",
                    "description": "ControlNet model identifier.",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "ControlNet Weight",
                    "description": "Strength of the ControlNet influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the guide image.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "guideImage": {
                    "title": "Guide Image",
                    "description": "Reference image for ControlNet guidance (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  },
                  "controlMode": {
                    "title": "Control Mode",
                    "description": "ControlNet guidance mode.",
                    "type": "string",
                    "default": "balanced",
                    "oneOf": [
                      {
                        "const": "balanced",
                        "description": "Equal weight between ControlNet and prompt."
                      },
                      {
                        "const": "controlnet",
                        "description": "Prioritize ControlNet guidance."
                      },
                      {
                        "const": "prompt",
                        "description": "Prioritize prompt guidance."
                      }
                    ]
                  },
                  "endStep": {
                    "title": "ControlNet End Step",
                    "description": "Absolute step number to end ControlNet influence. Must be greater than `startStep` and less than or equal to `steps`.",
                    "type": "integer",
                    "minimum": 1
                  },
                  "endStepPercentage": {
                    "title": "End Step Percentage",
                    "description": "Percentage of steps to end ControlNet influence. Must be greater than `startStepPercentage`.",
                    "type": "integer",
                    "minimum": 1,
                    "maximum": 100
                  },
                  "startStep": {
                    "title": "Start Step",
                    "description": "Absolute step number to start ControlNet influence. Must be less than `endStep`.",
                    "type": "integer",
                    "minimum": 0
                  },
                  "startStepPercentage": {
                    "title": "Start Step Percentage",
                    "description": "Percentage of steps to start ControlNet influence. Must be less than `endStepPercentage`.",
                    "type": "integer",
                    "minimum": 0,
                    "maximum": 99
                  }
                },
                "required": [
                  "model",
                  "guideImage"
                ],
                "allOf": [
                  {
                    "not": {
                      "required": [
                        "startStep",
                        "startStepPercentage"
                      ]
                    }
                  },
                  {
                    "not": {
                      "required": [
                        "endStep",
                        "endStepPercentage"
                      ]
                    }
                  }
                ],
                "additionalProperties": false
              }
            },
            "ipAdapters": {
              "title": "IP Adapters",
              "description": "IP-Adapters enable image-prompted generation, allowing you to use reference images to guide the style and content of your generations. Multiple IP Adapters can be used simultaneously.\n\n**Examples**:\n\n```json\n\"ipAdapters\": [\n  {\n    \"model\": \"<ip-adapter-model-air>\",\n    \"guideImages\": [\"c64351d5-4c59-42f7-95e1-eace013eddab\"],\n    \"weight\": 0.75\n  },\n  {\n    \"model\": \"<ip-adapter-model-air>\",\n    \"guideImages\": [\"d7e8f9a0-2b5c-4e7f-a1d3-9c8b7a6e5d4f\"],\n    \"weight\": 0.5\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#ipAdapters)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "IP-Adapter Model",
                    "description": "We make use of the [AIR system](https://runware.ai/models) to identify IP-Adapter models. This identifier is a unique string that represents a specific model.\n\n**Supported models list**:\n\n| AIR ID | Model Name |\n| --- | --- |\n| runware:55@1 | IP Adapter SDXL |\n| runware:55@2 | IP Adapter SDXL Plus |\n| runware:55@3 | IP Adapter SDXL Plus Face |\n| runware:55@4 | IP Adapter SDXL Vit-H |\n| runware:55@5 | IP Adapter SD 1.5 |\n| runware:55@6 | IP Adapter SD 1.5 Plus |\n| runware:55@7 | IP Adapter SD 1.5 Light |\n| runware:55@8 | IP Adapter SD 1.5 Plus Face |\n| runware:55@10 | IP Adapter SD 1.5 Vit-G |\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#model)",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "IP-Adapter Weight",
                    "description": "Strength of the IP-Adapter influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the reference.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "guideImages": {
                    "title": "Guide Images",
                    "description": "Images to guide the IP-Adapter (UUID, URL, Data URI, or Base64).",
                    "type": "array",
                    "minItems": 1,
                    "items": {
                      "title": "Image",
                      "description": "Image input (UUID, URL, Data URI, or Base64).",
                      "type": "string",
                      "anyOf": [
                        {
                          "format": "uuid"
                        },
                        {
                          "format": "uri"
                        },
                        {
                          "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                        },
                        {
                          "pattern": "^[a-zA-Z0-9+/=]+$"
                        }
                      ]
                    }
                  },
                  "combineMethod": {
                    "title": "Combine Method",
                    "description": "Controls how multiple reference images are combined.",
                    "type": "string",
                    "enum": [
                      "concat",
                      "add",
                      "subtract",
                      "average",
                      "norm_average"
                    ],
                    "default": "concat"
                  },
                  "embedScaling": {
                    "title": "Embed Scaling",
                    "description": "Determines which embedding components are used and their strength.",
                    "type": "string",
                    "enum": [
                      "only_v",
                      "kv",
                      "kv_penalty_c",
                      "k_mean_v_penalty_c"
                    ],
                    "default": "kv"
                  },
                  "weightType": {
                    "title": "Weight Type",
                    "description": "Shapes how influence evolves during generation.",
                    "type": "string",
                    "enum": [
                      "normal",
                      "ease_in",
                      "ease_out",
                      "ease_in_out",
                      "weak_input",
                      "weak_output",
                      "weak_middle",
                      "strong_middle",
                      "style_transfer",
                      "composition",
                      "strong_style_transfer",
                      "style_and_composition",
                      "strong_style_and_composition"
                    ],
                    "default": "normal"
                  },
                  "weightComposition": {
                    "title": "Weight Composition",
                    "description": "Controls composition/layout influence specifically.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": 0,
                    "maximum": 1
                  }
                },
                "required": [
                  "model",
                  "guideImages"
                ],
                "additionalProperties": false
              }
            },
            "hiresFix": {
              "title": "HiresFix",
              "description": "Two-stage generation for improved resolution and detail. The model generates at a lower resolution first, then upscales and refines the result in a second pass. Can be enabled with `true` for default settings, or configured as an object for fine-grained control over the upscaling model, steps, and strength.\n\nWhen using the object form, the `model` parameter is required. Available upscaling models:\n\n| Model | Name | Upscale Factor |\n| --- | --- | --- |\n| `runware:504@1` | RealESRGAN\\_x4plus | 4x |\n| `runware:realesrgan@anime-6b` | RealESRGAN\\_x4plus\\_anime\\_6B | 4x |\n| `runware:esrgan@animesharp` | 4x-AnimeSharp | 4x |\n| `runware:esrgan@ultrasharp` | 4x-UltraSharp | 4x |\n\n**Simple (boolean)**:\n\n```json\n\"hiresFix\": true\n```\n\n**Configured (object)**:\n\n```json\n\"hiresFix\": {\n  \"model\": \"runware:esrgan@ultrasharp\",\n  \"steps\": 15,\n  \"strength\": 0.6\n}\n```\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#hiresFix)",
              "oneOf": [
                {
                  "type": "boolean",
                  "const": true
                },
                {
                  "type": "object",
                  "properties": {
                    "model": {
                      "title": "Model",
                      "description": "The upscaling model to use for hires fix.",
                      "type": "string",
                      "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$",
                      "enum": [
                        "runware:realesrgan@anime-6b",
                        "runware:esrgan@animesharp",
                        "runware:esrgan@ultrasharp",
                        "runware:504@1"
                      ]
                    },
                    "steps": {
                      "title": "Steps",
                      "description": "Total number of denoising steps. Higher values generally produce more detailed results but take longer.",
                      "type": "integer",
                      "minimum": 1,
                      "maximum": 35,
                      "default": 10
                    },
                    "strength": {
                      "title": "Strength",
                      "description": "Strength of the transformation. Lower values result in more influence from the original input.",
                      "type": "number",
                      "multipleOf": 0.01,
                      "minimum": 0,
                      "maximum": 1,
                      "default": 0.8
                    },
                    "upscaleFactor": {
                      "title": "Upscale Factor",
                      "description": "Factor by which to upscale the generated image. A value of 2 doubles width and height.",
                      "type": "integer",
                      "enum": [
                        4
                      ],
                      "default": 4
                    }
                  },
                  "required": [
                    "model"
                  ],
                  "additionalProperties": false
                }
              ]
            },
            "photoMaker": {
              "title": "PhotoMaker",
              "description": "PhotoMaker enables personalized image generation while preserving the identity of a subject from reference images. Provide one or more photos of a person, and the model will generate new images that maintain their facial characteristics, expression tendencies, and overall identity across different styles and compositions.\n\n> [!NOTE]\n> Reference images must each contain a single, clear face. Up to 4 reference images can be provided for stronger identity preservation.\n\n**Examples**:\n\n```json\n\"photoMaker\": {\n  \"images\": [\"59a2edc2-45e6-429f-be5f-7ded59b92046\"],\n  \"strength\": 20,\n  \"style\": \"Cinematic\"\n}\n```\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#photoMaker)",
              "type": "object",
              "properties": {
                "images": {
                  "title": "Images",
                  "description": "Reference images for subject identity preservation. Each image must contain a single, clear face of the subject (UUID, URL, Data URI, or Base64).",
                  "type": "array",
                  "minItems": 1,
                  "maxItems": 4,
                  "items": {
                    "title": "Image",
                    "description": "Image input (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  }
                },
                "strength": {
                  "title": "Strength",
                  "description": "Controls the balance between preserving the subject's original features and the creative transformation specified in the prompt. Lower values provide stronger subject fidelity, higher values allow more creative freedom.",
                  "type": "integer",
                  "minimum": 15,
                  "maximum": 50,
                  "default": 15
                },
                "style": {
                  "title": "Style",
                  "description": "Artistic style applied to the generated images.",
                  "type": "string",
                  "default": "No style",
                  "oneOf": [
                    {
                      "const": "No style",
                      "description": "Maximizes subject fidelity while allowing creative freedom in the composition."
                    },
                    {
                      "const": "Cinematic",
                      "description": "Applies a movie-like aesthetic."
                    },
                    {
                      "const": "Disney Character",
                      "description": "Transforms the subject into a Disney-inspired character."
                    },
                    {
                      "const": "Digital Art",
                      "description": "Creates a digital artwork style."
                    },
                    {
                      "const": "Photographic",
                      "description": "Enhances photographic qualities."
                    },
                    {
                      "const": "Fantasy art",
                      "description": "Applies fantasy-themed artistic elements."
                    },
                    {
                      "const": "Neonpunk",
                      "description": "Creates a neon-colored cyberpunk aesthetic."
                    },
                    {
                      "const": "Enhance",
                      "description": "Improves overall image quality."
                    },
                    {
                      "const": "Comic book",
                      "description": "Transforms the subject into comic book style."
                    },
                    {
                      "const": "Lowpoly",
                      "description": "Creates a low-polygon geometric style."
                    },
                    {
                      "const": "Line art",
                      "description": "Converts the image into line drawing style."
                    }
                  ]
                }
              },
              "required": [
                "images"
              ],
              "additionalProperties": false
            },
            "ultralytics": {
              "title": "Ultralytics Features",
              "description": "Configuration object for Ultralytics face enhancement during generation. This feature uses face detection and inpainting to improve facial details in the same generation step, without requiring post-processing.\n\n> [!NOTE]\n> Face enhancement is available for Stable Diffusion 1.X, SDXL, and FLUX models. The system automatically detects faces and applies targeted refinement to improve quality while maintaining consistency with the overall generation.\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#ultralytics)",
              "type": "object",
              "properties": {
                "CFGScale": {
                  "title": "CFG Scale",
                  "description": "Face refinement guidance scale.",
                  "type": "number",
                  "multipleOf": 0.1,
                  "minimum": 0,
                  "maximum": 50,
                  "default": 8
                },
                "confidence": {
                  "title": "Confidence",
                  "description": "Confidence threshold for detection.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.9
                },
                "maskBlur": {
                  "title": "Mask Blur",
                  "description": "Mask feathering amount. Higher values create softer transitions between the enhanced face region and surrounding areas.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 100,
                  "default": 5
                },
                "maskPadding": {
                  "title": "Mask Padding",
                  "description": "Padding around detected face in pixels. Expands the refinement area to include surrounding context like hair and neck.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 20,
                  "default": 5
                },
                "negativePrompt": {
                  "title": "Negative Prompt",
                  "description": "Negative prompt for detection.",
                  "type": "string"
                },
                "positivePrompt": {
                  "title": "Positive Prompt",
                  "description": "Positive prompt for detection.",
                  "type": "string"
                },
                "steps": {
                  "title": "Steps",
                  "description": "Number of face refinement steps.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 100,
                  "default": 20
                },
                "strength": {
                  "title": "Strength",
                  "description": "Refinement strength. Lower values preserve more of the original, higher values allow more aggressive reconstruction.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.3
                }
              },
              "additionalProperties": false
            },
            "taskType": {
              "title": "Task Type",
              "description": "Identifier for the type of task being performed",
              "type": "string",
              "const": "imageInference"
            },
            "taskUUID": {
              "title": "Task UUID",
              "description": "UUID v4 identifier for tracking tasks and matching async responses. Must be unique per task.",
              "type": "string",
              "format": "uuid"
            },
            "webhookURL": {
              "title": "Webhook URL",
              "description": "Specifies a webhook URL where JSON responses will be sent via HTTP POST when generation tasks complete. For batch requests with multiple results, each completed item triggers a separate webhook call as it becomes available.",
              "type": "string",
              "format": "uri"
            },
            "includeCost": {
              "title": "Include Cost",
              "description": "Include task cost in the response.",
              "type": "boolean",
              "default": false
            },
            "numberResults": {
              "title": "Number of Results",
              "description": "Number of results to generate. Each result uses a different seed, producing variations of the same parameters.",
              "type": "integer",
              "minimum": 1,
              "default": 1,
              "maximum": 20
            },
            "uploadEndpoint": {
              "title": "Upload Endpoint",
              "description": "Specifies a URL where the generated content will be automatically uploaded using the HTTP PUT method. The raw binary data of the media file is sent directly as the request body. For secure uploads to cloud storage, use presigned URLs that include temporary authentication credentials.\n\n**Common use cases:**\n\n- **Cloud storage**: Upload directly to S3 buckets, Google Cloud Storage, or Azure Blob Storage using presigned URLs.\n- **CDN integration**: Upload to content delivery networks for immediate distribution.\n\n```text\n// S3 presigned URL for secure upload\nhttps://your-bucket.s3.amazonaws.com/generated/content.mp4?X-Amz-Signature=abc123&X-Amz-Expires=3600\n\n// Google Cloud Storage presigned URL\nhttps://storage.googleapis.com/your-bucket/content.jpg?X-Goog-Signature=xyz789\n\n// Custom storage endpoint\nhttps://storage.example.com/uploads/generated-image.jpg\n```\n\nThe content data will be sent as the request body to the specified URL when generation is complete.\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#uploadEndpoint)",
              "type": "string",
              "format": "uri"
            },
            "ttl": {
              "title": "TTL",
              "description": "Time-to-live (TTL) in seconds for generated content. Only applies when `outputType` is `URL`.",
              "type": "integer",
              "minimum": 60
            },
            "outputType": {
              "title": "Output Type",
              "description": "Image output type.",
              "type": "string",
              "enum": [
                "URL",
                "base64Data",
                "dataURI"
              ],
              "default": "URL"
            },
            "outputFormat": {
              "title": "Output Format",
              "description": "Specifies the file format of the generated output. The available values depend on the task type and the specific model's capabilities.\n\n- \\`JPG\\`: Best for photorealistic images with smaller file sizes (no transparency).\n- \\`PNG\\`: Lossless compression, supports high quality and transparency (alpha channel).\n- \\`WEBP\\`: Modern format providing superior compression and transparency support.\n- \\`MP4\\`: Widely supported video container (H.264), recommended for general use.\n- \\`WEBM\\`: Optimized for web delivery.\n- \\`MOV\\`: QuickTime format, common in professional workflows (Apple ecosystem).\n- \\`GIF\\`: Animated image format (no audio), suitable for short loops or previews.\n- \\`MP3\\`: Compressed audio, smaller file size.\n- \\`WAV\\`: Uncompressed, high-quality audio.\n- \\`FLAC\\`: Lossless compression.\n- \\`OGG\\`: Open-source compressed audio format (Vorbis codec).\n- \\`SVG\\`: Scalable Vector Graphics.\n- \\`TIFF\\`: High-quality output supporting layers.\n\n> [!NOTE]\n> \\*\\*Transparency\\*\\*: If you are using features like background removal or LayerDiffuse that require transparency, you must select a format that supports an alpha channel (e.g., \\`PNG\\`, \\`WEBP\\`, \\`TIFF\\`). \\`JPG\\` does not support transparency.\n\n[Read full documentation](https://runware.ai/docs/models/stabilityai-stable-diffusion-xl-v1-0-vae-fix#outputFormat)",
              "type": "string",
              "enum": [
                "JPG",
                "PNG",
                "WEBP"
              ],
              "default": "JPG"
            },
            "outputQuality": {
              "title": "Output Quality",
              "description": "Compression quality of the output. Higher values preserve quality but increase file size.",
              "type": "integer",
              "minimum": 20,
              "maximum": 99,
              "default": 95
            },
            "deliveryMethod": {
              "title": "Delivery Method",
              "description": "Determines how the API delivers task results.",
              "type": "string",
              "oneOf": [
                {
                  "const": "sync",
                  "description": "Returns complete results directly in the API response."
                },
                {
                  "const": "async",
                  "description": "Returns an immediate acknowledgment with the task UUID. Poll for results using getResponse."
                }
              ],
              "default": "sync"
            },
            "safety": {
              "type": "object",
              "title": "Safety Settings",
              "description": "Content safety checking configuration for image generation.",
              "properties": {
                "checkContent": {
                  "title": "Check Content",
                  "description": "Enable or disable content safety checking. When enabled, defaults to `fast` mode.",
                  "type": "boolean",
                  "default": false
                },
                "mode": {
                  "description": "Safety checking mode for image generation.",
                  "type": "string",
                  "oneOf": [
                    {
                      "const": "none",
                      "title": "None",
                      "description": "Disables checking."
                    },
                    {
                      "const": "fast",
                      "title": "Fast",
                      "description": "Performs a single check."
                    }
                  ],
                  "default": "none"
                }
              },
              "additionalProperties": false
            }
          },
          "allOf": [
            {
              "dependentRequired": {
                "width": [
                  "height"
                ],
                "height": [
                  "width"
                ]
              }
            }
          ],
          "additionalProperties": false,
          "required": [
            "positivePrompt",
            "taskType",
            "taskUUID",
            "model"
          ]
        },
        "description": "You must always POST an array of task objects."
      },
      "ResponseBody": {
        "type": "object",
        "properties": {
          "data": {
            "type": "array",
            "items": {
              "type": "object",
              "description": "Unknown response structure"
            }
          }
        }
      },
      "ErrorResponse": {
        "title": "Error Response",
        "description": "Standard error response returned by the Runware API.",
        "type": "object",
        "properties": {
          "errors": {
            "type": "array",
            "items": {
              "type": "object",
              "required": [
                "code",
                "message"
              ],
              "additionalProperties": true,
              "properties": {
                "code": {
                  "type": "string",
                  "description": "A short identifier for the error (e.g., invalidApiKey, timeoutProvider)."
                },
                "message": {
                  "type": "string",
                  "description": "A human-readable explanation of what went wrong."
                },
                "parameter": {
                  "type": "string",
                  "description": "The request parameter related to the error, if applicable."
                },
                "taskType": {
                  "type": "string",
                  "description": "The task type of the request that failed."
                },
                "taskUUID": {
                  "type": "string",
                  "description": "The unique identifier of the failed request."
                },
                "documentation": {
                  "type": "string",
                  "description": "A link to relevant documentation."
                }
              }
            }
          }
        },
        "required": [
          "errors"
        ],
        "additionalProperties": false
      }
    }
  },
  "paths": {
    "/": {
      "post": {
        "summary": "Run SD XL v1.0 VAE Fix",
        "description": "SD XL v1.0 VAE Fix refines the original SDXL image backbone with an improved VAE for cleaner detail and more stable colors. It targets high quality image generation across diverse styles and resolutions. Ideal for developers who need reliable diffusion outputs.",
        "operationId": "run_stabilityai_stable_diffusion_xl_v1_0_vae_fix",
        "requestBody": {
          "required": true,
          "content": {
            "application/json": {
              "schema": {
                "$ref": "#/components/schemas/RequestBody"
              }
            }
          }
        },
        "responses": {
          "200": {
            "description": "Successful response",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ResponseBody"
                }
              }
            }
          },
          "400": {
            "description": "Bad Request — Missing or invalid parameters.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "401": {
            "description": "Unauthorized — No valid API key provided.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "402": {
            "description": "Payment Required — Insufficient account balance.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "403": {
            "description": "Forbidden — The API key lacks permissions for this request.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "429": {
            "description": "Too Many Requests — Rate limit exceeded.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "500": {
            "description": "Server Error — Something went wrong on Runware's end.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "503": {
            "description": "Service Unavailable — Temporarily unavailable (maintenance or capacity).",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          }
        }
      }
    }
  },
  "security": [
    {
      "apiKeyAuth": []
    }
  ]
}