{
  "openapi": "3.1.0",
  "info": {
    "title": "Runware API - alibaba-z-image-turbo",
    "version": "1.0.0",
    "description": "Z-Image-Turbo is a distilled vision model for sub second image generation. It produces sharp photorealistic results and supports accurate Chinese text and English text inside images. It follows complex layout instructions with stable structure for UI, posters, and scenes."
  },
  "servers": [
    {
      "url": "https://api.runware.ai/v1",
      "description": "Runware REST API"
    }
  ],
  "components": {
    "securitySchemes": {
      "apiKeyAuth": {
        "type": "http",
        "scheme": "bearer",
        "description": "Runware API Key (e.g., Bearer <your-key>)"
      }
    },
    "schemas": {
      "AuthenticationTask": {
        "title": "Authentication",
        "description": "Authenticates a connection using an API key. Can be sent as the first element of the request array as an alternative to using the Authorization header.",
        "type": "object",
        "x-response-schema": "https://schemas.runware.ai/responses/utilities/authentication.json",
        "properties": {
          "taskType": {
            "const": "authentication",
            "title": "Task Type",
            "description": "The type of task to perform."
          },
          "apiKey": {
            "title": "API Key",
            "description": "Your Runware API key.",
            "type": "string"
          },
          "connectionSessionUUID": {
            "title": "Connection Session UUID",
            "description": "Optional session UUID to resume a previous connection and receive any buffered results.",
            "type": "string",
            "format": "uuid"
          }
        },
        "required": [
          "taskType",
          "apiKey"
        ],
        "additionalProperties": false
      },
      "RequestBody": {
        "type": "array",
        "items": {
          "type": "object",
          "required": [
            "positivePrompt",
            "taskType",
            "taskUUID",
            "model"
          ],
          "allOf": [
            {
              "dependentRequired": {
                "width": [
                  "height"
                ],
                "height": [
                  "width"
                ]
              }
            },
            {
              "dependentRequired": {
                "width": [
                  "height"
                ],
                "height": [
                  "width"
                ]
              }
            }
          ],
          "additionalProperties": false,
          "properties": {
            "taskType": {
              "title": "Task Type",
              "description": "Identifier for the type of task being performed",
              "type": "string",
              "const": "imageInference"
            },
            "taskUUID": {
              "title": "Task UUID",
              "description": "UUID v4 identifier for tracking tasks and matching async responses. Must be unique per task.",
              "type": "string",
              "format": "uuid"
            },
            "webhookURL": {
              "title": "Webhook URL",
              "description": "Specifies a webhook URL where JSON responses will be sent via HTTP POST when generation tasks complete. For batch requests with multiple results, each completed item triggers a separate webhook call as it becomes available.",
              "type": "string",
              "format": "uri"
            },
            "includeCost": {
              "title": "Include Cost",
              "description": "Include task cost in the response.",
              "type": "boolean",
              "default": false
            },
            "model": {
              "title": "Model",
              "description": "Identifier of the model to use for generation.",
              "type": "string",
              "const": "runware:z-image@turbo"
            },
            "numberResults": {
              "title": "Number of Results",
              "description": "Number of results to generate. Each result uses a different seed, producing variations of the same parameters.",
              "type": "integer",
              "minimum": 1,
              "maximum": 20,
              "default": 1
            },
            "uploadEndpoint": {
              "title": "Upload Endpoint",
              "description": "Specifies a URL where the generated content will be automatically uploaded using the HTTP PUT method. The raw binary data of the media file is sent directly as the request body. For secure uploads to cloud storage, use presigned URLs that include temporary authentication credentials.\n\n**Common use cases:**\n\n- **Cloud storage**: Upload directly to S3 buckets, Google Cloud Storage, or Azure Blob Storage using presigned URLs.\n- **CDN integration**: Upload to content delivery networks for immediate distribution.\n\n```text\n// S3 presigned URL for secure upload\nhttps://your-bucket.s3.amazonaws.com/generated/content.mp4?X-Amz-Signature=abc123&X-Amz-Expires=3600\n\n// Google Cloud Storage presigned URL\nhttps://storage.googleapis.com/your-bucket/content.jpg?X-Goog-Signature=xyz789\n\n// Custom storage endpoint\nhttps://storage.example.com/uploads/generated-image.jpg\n```\n\nThe content data will be sent as the request body to the specified URL when generation is complete.\n\n[Read full documentation](https://runware.ai/docs/models/alibaba-z-image-turbo#uploadEndpoint)",
              "type": "string",
              "format": "uri"
            },
            "ttl": {
              "title": "TTL",
              "description": "Time-to-live (TTL) in seconds for generated content. Only applies when `outputType` is `URL`.",
              "type": "integer",
              "minimum": 60
            },
            "outputType": {
              "title": "Output Type",
              "description": "Image output type.",
              "type": "string",
              "enum": [
                "URL",
                "base64Data",
                "dataURI"
              ],
              "default": "URL"
            },
            "outputFormat": {
              "title": "Output Format",
              "description": "Specifies the file format of the generated output. The available values depend on the task type and the specific model's capabilities.\n\n- \\`JPG\\`: Best for photorealistic images with smaller file sizes (no transparency).\n- \\`PNG\\`: Lossless compression, supports high quality and transparency (alpha channel).\n- \\`WEBP\\`: Modern format providing superior compression and transparency support.\n- \\`MP4\\`: Widely supported video container (H.264), recommended for general use.\n- \\`WEBM\\`: Optimized for web delivery.\n- \\`MOV\\`: QuickTime format, common in professional workflows (Apple ecosystem).\n- \\`GIF\\`: Animated image format (no audio), suitable for short loops or previews.\n- \\`MP3\\`: Compressed audio, smaller file size.\n- \\`WAV\\`: Uncompressed, high-quality audio.\n- \\`FLAC\\`: Lossless compression.\n- \\`SVG\\`: Scalable Vector Graphics.\n- \\`TIFF\\`: High-quality output supporting layers.\n\n> [!NOTE]\n> \\*\\*Transparency\\*\\*: If you are using features like background removal or LayerDiffuse that require transparency, you must select a format that supports an alpha channel (e.g., \\`PNG\\`, \\`WEBP\\`, \\`TIFF\\`). \\`JPG\\` does not support transparency.\n\n[Read full documentation](https://runware.ai/docs/models/alibaba-z-image-turbo#outputFormat)",
              "type": "string",
              "enum": [
                "JPG",
                "PNG",
                "WEBP"
              ],
              "default": "JPG"
            },
            "outputQuality": {
              "title": "Output Quality",
              "description": "Compression quality of the output. Higher values preserve quality but increase file size.",
              "type": "integer",
              "minimum": 20,
              "maximum": 99,
              "default": 95
            },
            "deliveryMethod": {
              "title": "Delivery Method",
              "description": "Determines how the API delivers task results.",
              "type": "string",
              "oneOf": [
                {
                  "const": "sync",
                  "description": "Returns complete results directly in the API response."
                },
                {
                  "const": "async",
                  "description": "Returns an immediate acknowledgment with the task UUID. Poll for results using getResponse."
                }
              ],
              "default": "sync"
            },
            "safety": {
              "type": "object",
              "title": "Safety Settings",
              "description": "Content safety checking configuration for image generation.",
              "properties": {
                "checkContent": {
                  "title": "Check Content",
                  "description": "Enable or disable content safety checking. When enabled, defaults to `fast` mode.",
                  "type": "boolean",
                  "default": false
                },
                "mode": {
                  "description": "Safety checking mode for image generation.",
                  "type": "string",
                  "oneOf": [
                    {
                      "const": "none",
                      "title": "None",
                      "description": "Disables checking."
                    },
                    {
                      "const": "fast",
                      "title": "Fast",
                      "description": "Performs a single check."
                    }
                  ],
                  "default": "none"
                }
              },
              "additionalProperties": false
            },
            "inputs": {
              "title": "Inputs",
              "description": "The unified payload wrapper for complex media assets dictating image, video or audio inference constraints.",
              "type": "object",
              "properties": {
                "seedImage": {
                  "title": "Seed Image",
                  "description": "Image used as a starting point for the generation (UUID, URL, Data URI, or Base64).",
                  "type": "string",
                  "anyOf": [
                    {
                      "format": "uuid"
                    },
                    {
                      "format": "uri"
                    },
                    {
                      "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                    },
                    {
                      "pattern": "^[a-zA-Z0-9+/=]+$"
                    }
                  ]
                },
                "maskImage": {
                  "title": "Mask Image",
                  "description": "Image used to specify which areas of the seed image should be edited (UUID, URL, Data URI, or Base64).",
                  "type": "string",
                  "anyOf": [
                    {
                      "format": "uuid"
                    },
                    {
                      "format": "uri"
                    },
                    {
                      "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                    },
                    {
                      "pattern": "^[a-zA-Z0-9+/=]+$"
                    }
                  ]
                }
              },
              "additionalProperties": false
            },
            "positivePrompt": {
              "title": "Positive Prompt",
              "description": "Text prompt describing elements to include in the generated output.",
              "type": "string",
              "minLength": 2,
              "maxLength": 3000
            },
            "negativePrompt": {
              "title": "Negative Prompt",
              "description": "Prompt to guide what to exclude from generation. Ignored when guidance is disabled (CFGScale ≤ 1).",
              "type": "string",
              "minLength": 2,
              "maxLength": 3000
            },
            "width": {
              "title": "Width",
              "description": "Width of the generated media in pixels.",
              "type": "integer",
              "minimum": 128,
              "maximum": 2048,
              "multipleOf": 16
            },
            "height": {
              "title": "Height",
              "description": "Height of the generated media in pixels.",
              "type": "integer",
              "minimum": 128,
              "maximum": 2048,
              "multipleOf": 16
            },
            "seed": {
              "title": "Seed",
              "description": "Random seed for reproducible generation. When not provided, a random seed is generated in the unsigned 32-bit range.",
              "type": "integer",
              "minimum": 0,
              "maximum": 9223372036854776000
            },
            "steps": {
              "title": "Steps",
              "description": "Total number of denoising steps. Higher values generally produce more detailed results but take longer.",
              "type": "integer",
              "minimum": 1,
              "maximum": 50
            },
            "scheduler": {
              "title": "Scheduler",
              "description": "Scheduler to use for the diffusion process.",
              "type": "string",
              "enum": [
                "DDIM",
                "DDIMScheduler",
                "DDPMScheduler",
                "DEISMultistepScheduler",
                "Default",
                "DPM++",
                "DPM++ 2M",
                "DPM++ 2M Beta",
                "DPM++ 2M Exponential",
                "DPM++ 2M Karras",
                "DPM++ 2M SDE",
                "DPM++ 2M SDE Beta",
                "DPM++ 2M SDE Exponential",
                "DPM++ 2M SDE Karras",
                "DPM++ 2M SDE Uniform",
                "DPM++ 2M Uniform",
                "DPM++ 3M",
                "DPM++ 3M Beta",
                "DPM++ 3M Exponential",
                "DPM++ 3M Karras",
                "DPM++ 3M SDE Uniform",
                "DPM++ 3M Uniform",
                "DPM++ Beta",
                "DPM++ Exponential",
                "DPM++ Karras",
                "DPM++ SDE",
                "DPM++ SDE Beta",
                "DPM++ SDE Exponential",
                "DPM++ SDE Karras",
                "DPM++ Uniform",
                "DPM++ Uniform Beta",
                "DPM++ Uniform Exponential",
                "DPM++ Uniform Karras",
                "DPMSolverMultistepInverse",
                "DPMSolverMultistepScheduler",
                "DPMSolverSinglestepScheduler",
                "EDMDPMSolverMultistepScheduler",
                "EDMEulerScheduler",
                "Euler",
                "Euler a",
                "Euler Beta",
                "Euler DiscreteScheduler",
                "Euler Exponential",
                "Euler Karras",
                "EulerAncestralDiscreteScheduler",
                "FlowMatchEulerDiscreteScheduler",
                "Heun",
                "HeunDiscreteScheduler",
                "Heun Karras",
                "IPNDMScheduler",
                "IPNDM Uniform",
                "IPNDM Uniform Beta",
                "IPNDM Uniform Exponential",
                "IPNDM Uniform Karras",
                "KDPM2AncestralDiscreteScheduler",
                "KDPM2DiscreteScheduler",
                "LCM",
                "LCMScheduler",
                "LMS",
                "LMSDiscreteScheduler",
                "LMS Karras",
                "PNDMScheduler",
                "TCDScheduler",
                "UniPC",
                "UniPC 2M",
                "UniPC 2M Karras",
                "UniPC 2M Uniform",
                "UniPC 3M",
                "UniPC 3M Karras",
                "UniPC 3M Uniform",
                "UniPC Karras",
                "UniPC Uniform",
                "UniPC Uniform Beta",
                "UniPC Uniform Exponential",
                "UniPC Uniform Karras"
              ]
            },
            "CFGScale": {
              "title": "CFG Scale",
              "description": "Guidance scale representing how closely the output will resemble the prompt. Higher values produce results more aligned with the prompt.",
              "type": "number",
              "multipleOf": 0.01,
              "minimum": 0,
              "maximum": 20
            },
            "strength": {
              "title": "Strength",
              "description": "Strength of the transformation. Lower values result in more influence from the original input.",
              "type": "number",
              "multipleOf": 0.01,
              "minimum": 0,
              "maximum": 1,
              "default": 0.8
            },
            "maskMargin": {
              "title": "Mask Margin",
              "description": "Extra context pixels around the masked region during inpainting. The model zooms into the masked area with these additional pixels for better integration.",
              "type": "integer",
              "minimum": 32,
              "maximum": 128
            },
            "acceleratorOptions": {
              "title": "Accelerator Options",
              "description": "Advanced caching mechanisms to speed up generation.",
              "type": "object",
              "properties": {
                "cacheEndStep": {
                  "title": "Cache End Step",
                  "description": "Absolute step number to end caching. Must be greater than `cacheStartStep` and less than or equal to `steps`.",
                  "type": "integer",
                  "minimum": 1
                },
                "cacheEndStepPercentage": {
                  "title": "Cache End Step Percentage",
                  "description": "Percentage of steps to end caching. Alternative to `cacheEndStep`. Must be greater than `cacheStartStepPercentage`.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 100
                },
                "cacheMaxConsecutiveSteps": {
                  "title": "Cache Max Consecutive Steps",
                  "description": "Limits the maximum number of consecutive steps that can use cached computations before forcing a fresh computation.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 5,
                  "default": 3
                },
                "cacheStartStep": {
                  "title": "Cache Start Step",
                  "description": "Absolute step number to start caching. Must be less than `cacheEndStep`.",
                  "type": "integer",
                  "minimum": 0
                },
                "cacheStartStepPercentage": {
                  "title": "Cache Start Step Percentage",
                  "description": "Percentage of steps to start caching. Alternative to `cacheStartStep`. Must be less than `cacheEndStepPercentage`.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 99
                },
                "fbCache": {
                  "title": "FB Cache",
                  "description": "First Block Cache (FBCache) acceleration. Reuses feature block computations across steps.",
                  "type": "boolean",
                  "default": false
                },
                "fbCacheThreshold": {
                  "title": "FB Cache Threshold",
                  "description": "Controls the sensitivity threshold for determining when to reuse cached computations. Lower values reuse more aggressively.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.25
                },
                "teaCache": {
                  "title": "TeaCache",
                  "description": "TeaCache acceleration for transformer-based models. Estimates step differences to skip redundant computations.",
                  "type": "boolean",
                  "default": false
                },
                "teaCacheDistance": {
                  "title": "TeaCache Distance",
                  "description": "Controls the aggressiveness of the TeaCache feature. Lower values prioritize quality, higher values prioritize speed.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.5
                },
                "dbCache": {
                  "title": "DB Cache",
                  "description": "DB Cache (CacheDiT) acceleration. Caches and reuses intermediate transformer block outputs to skip redundant computations.",
                  "type": "boolean",
                  "default": false
                },
                "dbCacheThreshold": {
                  "title": "DB Cache Threshold",
                  "description": "Controls the sensitivity threshold for DB Cache. Lower values reuse cached blocks more aggressively, higher values prioritize quality.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.25
                },
                "dbCacheSkipInterval": {
                  "title": "DB Cache Skip Interval",
                  "description": "Controls how many steps to skip between cache refreshes. Higher values skip more steps for faster generation at the cost of quality.",
                  "type": "integer",
                  "minimum": 1,
                  "default": 5
                }
              },
              "allOf": [
                {
                  "not": {
                    "required": [
                      "cacheStartStep",
                      "cacheStartStepPercentage"
                    ]
                  }
                },
                {
                  "not": {
                    "required": [
                      "cacheEndStep",
                      "cacheEndStepPercentage"
                    ]
                  }
                }
              ],
              "additionalProperties": false
            },
            "outpaint": {
              "title": "Outpaint",
              "description": "Extends image boundaries in specified directions. Final width/height must account for original image plus extensions.",
              "type": "object",
              "properties": {
                "bottom": {
                  "title": "Outpaint Bottom",
                  "description": "Number of pixels to extend to the bottom.",
                  "type": "integer",
                  "minimum": 0
                },
                "left": {
                  "title": "Outpaint Left",
                  "description": "Number of pixels to extend to the left.",
                  "type": "integer",
                  "minimum": 0
                },
                "right": {
                  "title": "Outpaint Right",
                  "description": "Number of pixels to extend to the right.",
                  "type": "integer",
                  "minimum": 0
                },
                "top": {
                  "title": "Outpaint Top",
                  "description": "Number of pixels to extend to the top.",
                  "type": "integer",
                  "minimum": 0
                }
              },
              "additionalProperties": false
            },
            "lora": {
              "title": "LoRA",
              "description": "With LoRA (Low-Rank Adaptation), you can adapt a model to specific styles or features by emphasizing particular aspects of the data. This technique enhances the quality and relevance of generated content and can be especially useful when the output needs to adhere to a specific artistic style or follow particular guidelines.\n\nMultiple LoRA models can be used simultaneously to achieve different adaptation goals.\n\n**Examples**:\n\n```json\n\"lora\": [\n  {\n    \"model\": \"<lora-model-air>\",\n    \"weight\": 0.8\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/alibaba-z-image-turbo#lora)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "LoRA Model",
                    "description": "LoRA model identifier.",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "LoRA Weight",
                    "description": "Strength of the LoRA influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the LoRA's style.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "transformer": {
                    "title": "LoRA Transformer",
                    "description": "Transformer stages to apply LoRA. Some video models use separate high-noise and low-noise processing stages, and LoRAs can be selectively applied to optimize their effectiveness.",
                    "type": "string",
                    "oneOf": [
                      {
                        "const": "high",
                        "title": "High",
                        "description": "Apply LoRA only to the high-noise processing stage (coarse structure and early generation steps)."
                      },
                      {
                        "const": "low",
                        "title": "Low",
                        "description": "Apply LoRA only to the low-noise processing stage (fine details and later generation steps)."
                      },
                      {
                        "const": "both",
                        "title": "Both",
                        "description": "Apply LoRA to both stages for full coverage."
                      }
                    ],
                    "default": "both"
                  }
                },
                "required": [
                  "model"
                ],
                "additionalProperties": false
              }
            },
            "controlNet": {
              "title": "ControlNet",
              "description": "With ControlNet, you can provide a guide image to help the model generate images that align with the desired structure. This guide image can be generated with our ControlNet preprocessing tool, extracting guidance information from an input image. The guide image can be in the form of an edge map, a pose, a depth estimation or any other type of control image that guides the generation process via the ControlNet model.\n\nMultiple ControlNet models can be used at the same time to provide different types of guidance information to the model.\n\n**Examples**:\n\n```json\n\"controlNet\": [\n  {\n    \"model\": \"<controlnet-model-air>\",\n    \"guideImage\": \"c64351d5-4c59-42f7-95e1-eace013eddab\",\n    \"weight\": 0.7,\n    \"startStep\": 0,\n    \"endStep\": 20,\n    \"controlMode\": \"controlnet\"\n  }\n]\n```\n\n[Read full documentation](https://runware.ai/docs/models/alibaba-z-image-turbo#controlNet)",
              "type": "array",
              "minItems": 1,
              "items": {
                "type": "object",
                "properties": {
                  "model": {
                    "title": "ControlNet Model",
                    "description": "ControlNet model identifier.",
                    "type": "string",
                    "pattern": "^[a-zA-Z0-9._-]+(:[a-zA-Z0-9._/@-]+)?$"
                  },
                  "weight": {
                    "title": "ControlNet Weight",
                    "description": "Strength of the ControlNet influence. A value of 0 means no influence. Higher values increase the influence, and negative values can be used to steer away from the guide image.",
                    "type": "number",
                    "multipleOf": 0.01,
                    "minimum": -4,
                    "maximum": 4,
                    "default": 1
                  },
                  "guideImage": {
                    "title": "Guide Image",
                    "description": "Reference image for ControlNet guidance (UUID, URL, Data URI, or Base64).",
                    "type": "string",
                    "anyOf": [
                      {
                        "format": "uuid"
                      },
                      {
                        "format": "uri"
                      },
                      {
                        "pattern": "^data:image\\/[a-zA-Z]+;base64,[a-zA-Z0-9+/=]+$"
                      },
                      {
                        "pattern": "^[a-zA-Z0-9+/=]+$"
                      }
                    ]
                  },
                  "controlMode": {
                    "title": "Control Mode",
                    "description": "ControlNet guidance mode.",
                    "type": "string",
                    "default": "balanced",
                    "oneOf": [
                      {
                        "const": "balanced",
                        "description": "Equal weight between ControlNet and prompt."
                      },
                      {
                        "const": "controlnet",
                        "description": "Prioritize ControlNet guidance."
                      },
                      {
                        "const": "prompt",
                        "description": "Prioritize prompt guidance."
                      }
                    ]
                  },
                  "endStep": {
                    "title": "ControlNet End Step",
                    "description": "Absolute step number to end ControlNet influence. Must be greater than `startStep` and less than or equal to `steps`.",
                    "type": "integer",
                    "minimum": 1
                  },
                  "endStepPercentage": {
                    "title": "End Step Percentage",
                    "description": "Percentage of steps to end ControlNet influence. Must be greater than `startStepPercentage`.",
                    "type": "integer",
                    "minimum": 1,
                    "maximum": 100
                  },
                  "startStep": {
                    "title": "Start Step",
                    "description": "Absolute step number to start ControlNet influence. Must be less than `endStep`.",
                    "type": "integer",
                    "minimum": 0
                  },
                  "startStepPercentage": {
                    "title": "Start Step Percentage",
                    "description": "Percentage of steps to start ControlNet influence. Must be less than `endStepPercentage`.",
                    "type": "integer",
                    "minimum": 0,
                    "maximum": 99
                  }
                },
                "required": [
                  "model",
                  "guideImage"
                ],
                "allOf": [
                  {
                    "not": {
                      "required": [
                        "startStep",
                        "startStepPercentage"
                      ]
                    }
                  },
                  {
                    "not": {
                      "required": [
                        "endStep",
                        "endStepPercentage"
                      ]
                    }
                  }
                ],
                "additionalProperties": false
              }
            },
            "ultralytics": {
              "title": "Ultralytics Features",
              "description": "Configuration object for Ultralytics face enhancement during generation. This feature uses face detection and inpainting to improve facial details in the same generation step, without requiring post-processing.\n\n> [!NOTE]\n> Face enhancement is available for Stable Diffusion 1.X, SDXL, and FLUX models. The system automatically detects faces and applies targeted refinement to improve quality while maintaining consistency with the overall generation.\n\n[Read full documentation](https://runware.ai/docs/models/alibaba-z-image-turbo#ultralytics)",
              "type": "object",
              "properties": {
                "CFGScale": {
                  "title": "CFG Scale",
                  "description": "Face refinement guidance scale.",
                  "type": "number",
                  "multipleOf": 0.1,
                  "minimum": 0,
                  "maximum": 50,
                  "default": 8
                },
                "confidence": {
                  "title": "Confidence",
                  "description": "Confidence threshold for detection.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.9
                },
                "maskBlur": {
                  "title": "Mask Blur",
                  "description": "Mask feathering amount. Higher values create softer transitions between the enhanced face region and surrounding areas.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 100,
                  "default": 5
                },
                "maskPadding": {
                  "title": "Mask Padding",
                  "description": "Padding around detected face in pixels. Expands the refinement area to include surrounding context like hair and neck.",
                  "type": "integer",
                  "minimum": 0,
                  "maximum": 20,
                  "default": 5
                },
                "negativePrompt": {
                  "title": "Negative Prompt",
                  "description": "Negative prompt for detection.",
                  "type": "string"
                },
                "positivePrompt": {
                  "title": "Positive Prompt",
                  "description": "Positive prompt for detection.",
                  "type": "string"
                },
                "steps": {
                  "title": "Steps",
                  "description": "Number of face refinement steps.",
                  "type": "integer",
                  "minimum": 1,
                  "maximum": 100,
                  "default": 20
                },
                "strength": {
                  "title": "Strength",
                  "description": "Refinement strength. Lower values preserve more of the original, higher values allow more aggressive reconstruction.",
                  "type": "number",
                  "multipleOf": 0.01,
                  "minimum": 0,
                  "maximum": 1,
                  "default": 0.3
                }
              },
              "additionalProperties": false
            }
          }
        },
        "description": "You must always POST an array of task objects."
      },
      "ResponseBody": {
        "type": "object",
        "properties": {
          "data": {
            "type": "array",
            "items": {
              "type": "object",
              "description": "Unknown response structure"
            }
          }
        }
      },
      "ErrorResponse": {
        "title": "Error Response",
        "description": "Standard error response returned by the Runware API.",
        "type": "object",
        "properties": {
          "errors": {
            "type": "array",
            "items": {
              "type": "object",
              "required": [
                "code",
                "message"
              ],
              "additionalProperties": true,
              "properties": {
                "code": {
                  "type": "string",
                  "description": "A short identifier for the error (e.g., invalidApiKey, timeoutProvider)."
                },
                "message": {
                  "type": "string",
                  "description": "A human-readable explanation of what went wrong."
                },
                "parameter": {
                  "type": "string",
                  "description": "The request parameter related to the error, if applicable."
                },
                "taskType": {
                  "type": "string",
                  "description": "The task type of the request that failed."
                },
                "taskUUID": {
                  "type": "string",
                  "description": "The unique identifier of the failed request."
                },
                "documentation": {
                  "type": "string",
                  "description": "A link to relevant documentation."
                }
              }
            }
          }
        },
        "required": [
          "errors"
        ],
        "additionalProperties": false
      }
    }
  },
  "paths": {
    "/": {
      "post": {
        "summary": "Run alibaba-z-image-turbo",
        "description": "Z-Image-Turbo is a distilled vision model for sub second image generation. It produces sharp photorealistic results and supports accurate Chinese text and English text inside images. It follows complex layout instructions with stable structure for UI, posters, and scenes.",
        "operationId": "run_alibaba_z_image_turbo",
        "requestBody": {
          "required": true,
          "content": {
            "application/json": {
              "schema": {
                "$ref": "#/components/schemas/RequestBody"
              }
            }
          }
        },
        "responses": {
          "200": {
            "description": "Successful response",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ResponseBody"
                }
              }
            }
          },
          "400": {
            "description": "Bad Request — Missing or invalid parameters.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "401": {
            "description": "Unauthorized — No valid API key provided.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "402": {
            "description": "Payment Required — Insufficient account balance.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "403": {
            "description": "Forbidden — The API key lacks permissions for this request.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "429": {
            "description": "Too Many Requests — Rate limit exceeded.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "500": {
            "description": "Server Error — Something went wrong on Runware's end.",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          },
          "503": {
            "description": "Service Unavailable — Temporarily unavailable (maintenance or capacity).",
            "content": {
              "application/json": {
                "schema": {
                  "$ref": "#/components/schemas/ErrorResponse"
                }
              }
            }
          }
        }
      }
    }
  },
  "security": [
    {
      "apiKeyAuth": []
    }
  ]
}