diff --git a/.sdk.json b/.sdk.json
index 07347e0..3f19ce7 100644
--- a/.sdk.json
+++ b/.sdk.json
@@ -1,5 +1,5 @@
{
- "id": "a99332c4-395a-42e0-8a2d-833989f9bf84",
+ "id": "1be4520e-0bec-4a18-81cf-2488a9d303f8",
"tracked_paths": [
{
"editable": true,
@@ -209,6 +209,18 @@
"editable": true,
"path": "magic_hour/resources/v1/auto_subtitle_generator/client.py"
},
+ {
+ "editable": true,
+ "path": "magic_hour/resources/v1/body_swap/README.md"
+ },
+ {
+ "editable": true,
+ "path": "magic_hour/resources/v1/body_swap/__init__.py"
+ },
+ {
+ "editable": true,
+ "path": "magic_hour/resources/v1/body_swap/client.py"
+ },
{
"editable": true,
"path": "magic_hour/resources/v1/client.py"
@@ -453,6 +465,10 @@
"editable": false,
"path": "magic_hour/types/models/v1_auto_subtitle_generator_create_response.py"
},
+ {
+ "editable": false,
+ "path": "magic_hour/types/models/v1_body_swap_create_response.py"
+ },
{
"editable": false,
"path": "magic_hour/types/models/v1_face_detection_create_response.py"
@@ -689,6 +705,14 @@
"editable": false,
"path": "magic_hour/types/params/v1_auto_subtitle_generator_create_body_style_custom_config.py"
},
+ {
+ "editable": false,
+ "path": "magic_hour/types/params/v1_body_swap_create_body.py"
+ },
+ {
+ "editable": false,
+ "path": "magic_hour/types/params/v1_body_swap_create_body_assets.py"
+ },
{
"editable": false,
"path": "magic_hour/types/params/v1_face_detection_create_body.py"
@@ -865,6 +889,10 @@
"editable": false,
"path": "tests/test_v1_auto_subtitle_generator_client.py"
},
+ {
+ "editable": false,
+ "path": "tests/test_v1_body_swap_client.py"
+ },
{
"editable": false,
"path": "tests/test_v1_face_detection_client.py"
diff --git a/README.md b/README.md
index 120c153..9996b3a 100644
--- a/README.md
+++ b/README.md
@@ -238,6 +238,11 @@ download_urls = result.downloads
- [create](magic_hour/resources/v1/auto_subtitle_generator/README.md#create) - Auto Subtitle Generator
- [generate](magic_hour/resources/v1/auto_subtitle_generator/README.md#generate) - Auto Subtitle Generator Generate Workflow
+### [v1.body_swap](magic_hour/resources/v1/body_swap/README.md)
+
+- [create](magic_hour/resources/v1/body_swap/README.md#create) - Body Swap
+- [generate](magic_hour/resources/v1/body_swap/README.md#generate) - Body Swap Generate Workflow
+
### [v1.face_detection](magic_hour/resources/v1/face_detection/README.md)
- [create](magic_hour/resources/v1/face_detection/README.md#create) - Face Detection
diff --git a/magic_hour/README.md b/magic_hour/README.md
index e43b358..f3746b6 100644
--- a/magic_hour/README.md
+++ b/magic_hour/README.md
@@ -17,6 +17,7 @@
- [animation](resources/v1/animation/README.md) - animation
- [audio_projects](resources/v1/audio_projects/README.md) - audio_projects
- [auto_subtitle_generator](resources/v1/auto_subtitle_generator/README.md) - auto_subtitle_generator
+- [body_swap](resources/v1/body_swap/README.md) - body_swap
- [face_detection](resources/v1/face_detection/README.md) - face_detection
- [face_swap](resources/v1/face_swap/README.md) - face_swap
- [face_swap_photo](resources/v1/face_swap_photo/README.md) - face_swap_photo
diff --git a/magic_hour/environment.py b/magic_hour/environment.py
index aa59d34..552ffde 100644
--- a/magic_hour/environment.py
+++ b/magic_hour/environment.py
@@ -6,7 +6,7 @@ class Environment(enum.Enum):
"""Pre-defined base URLs for the API"""
ENVIRONMENT = "https://api.magichour.ai"
- MOCK_SERVER = "https://api.sideko.dev/v1/mock/magichour/magic-hour/0.61.1"
+ MOCK_SERVER = "https://api.sideko.dev/v1/mock/magichour/magic-hour/0.62.0"
def _get_base_url(
diff --git a/magic_hour/resources/v1/README.md b/magic_hour/resources/v1/README.md
index 466a0b8..eb6ab8e 100644
--- a/magic_hour/resources/v1/README.md
+++ b/magic_hour/resources/v1/README.md
@@ -17,6 +17,7 @@
- [animation](animation/README.md) - animation
- [audio_projects](audio_projects/README.md) - audio_projects
- [auto_subtitle_generator](auto_subtitle_generator/README.md) - auto_subtitle_generator
+- [body_swap](body_swap/README.md) - body_swap
- [face_detection](face_detection/README.md) - face_detection
- [face_swap](face_swap/README.md) - face_swap
- [face_swap_photo](face_swap_photo/README.md) - face_swap_photo
diff --git a/magic_hour/resources/v1/ai_image_generator/README.md b/magic_hour/resources/v1/ai_image_generator/README.md
index e033a0e..543d78f 100644
--- a/magic_hour/resources/v1/ai_image_generator/README.md
+++ b/magic_hour/resources/v1/ai_image_generator/README.md
@@ -75,18 +75,18 @@ Create an AI image with advanced model selection and quality controls.
#### Parameters
-| Parameter | Required | Deprecated | Description | Example |
-| ----------------- | :------: | :--------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------- |
-| `image_count` | ✓ | ✗ | Number of images to generate. Maximum varies by model. | `1` |
-| `style` | ✓ | ✗ | The art style to use for image generation. | `{"prompt": "Cool image", "tool": "ai-anime-generator"}` |
-| `└─ prompt` | ✓ | — | The prompt used for the image(s). | `"Cool image"` |
-| `└─ quality_mode` | ✗ | ✓ | DEPRECATED: Use `model` field instead for explicit model selection. Legacy quality mode mapping: - `standard` → `z-image-turbo` model - `pro` → `seedream-v4` model If model is specified, it will take precedence over the legacy quality_mode field. | `"pro"` |
-| `└─ tool` | ✗ | — | The art style to use for image generation. Defaults to 'general' if not provided. | `"ai-anime-generator"` |
-| `aspect_ratio` | ✗ | ✗ | The aspect ratio of the output image(s). If not specified, defaults to `1:1` (square). | `"1:1"` |
-| `model` | ✗ | ✗ | The AI model to use for image generation. Each model has different capabilities and costs. **Models:** - `default` - Use the model we recommend, which will change over time. This is recommended unless you need a specific model. This is the default behavior. - `flux-schnell` - from 5 credits/image - Supported resolutions: 640px, 1k, 2k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `z-image-turbo` - from 5 credits/image - Supported resolutions: 640px, 1k, 2k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `seedream-v4` - from 40 credits/image - Supported resolutions: 640px, 1k, 2k, 4k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `nano-banana` - from 50 credits/image - Supported resolutions: 640px, 1k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `nano-banana-2` - from 100 credits/image - Supported resolutions: 640px, 1k, 2k, 4k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `nano-banana-pro` - from 150 credits/image - Supported resolutions: 1k, 2k, 4k - Available for tiers: creator, pro, business - Image count allowed: 1, 4, 9, 16 **Deprecated Enum Values:** - `seedream` - Use `seedream-v4` instead. | `"default"` |
-| `name` | ✗ | ✗ | Give your image a custom name for easy identification. | `"My Ai Image image"` |
-| `orientation` | ✗ | ✓ | DEPRECATED: Use `aspect_ratio` instead. The orientation of the output image(s). `aspect_ratio` takes precedence when `orientation` if both are provided. | `"landscape"` |
-| `resolution` | ✗ | ✗ | Maximum resolution (longest edge) for the output image. **Options:** - `640px` — up to 640px - `1k` — up to 1024px - `2k` — up to 2048px - `4k` — up to 4096px - `auto` — **Deprecated.** Mapped server-side from your subscription tier to the best matching resolution the model supports **Per-model support:** - `flux-schnell` - 640px, 1k, 2k - `z-image-turbo` - 640px, 1k, 2k - `seedream-v4` - 640px, 1k, 2k, 4k - `nano-banana` - 640px, 1k - `nano-banana-2` - 640px, 1k, 2k, 4k - `nano-banana-pro` - 1k, 2k, 4k Note: Resolution availability depends on the model and your subscription tier. | `"auto"` |
+| Parameter | Required | Deprecated | Description | Example |
+| ----------------- | :------: | :--------: | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------- |
+| `image_count` | ✓ | ✗ | Number of images to generate. Maximum varies by model. | `1` |
+| `style` | ✓ | ✗ | The art style to use for image generation. | `{"prompt": "Cool image", "tool": "ai-anime-generator"}` |
+| `└─ prompt` | ✓ | — | The prompt used for the image(s). | `"Cool image"` |
+| `└─ quality_mode` | ✗ | ✓ | DEPRECATED: Use `model` field instead for explicit model selection. Legacy quality mode mapping: - `standard` → `z-image-turbo` model - `pro` → `seedream-v4` model If model is specified, it will take precedence over the legacy quality_mode field. | `"pro"` |
+| `└─ tool` | ✗ | — | The art style to use for image generation. Defaults to 'general' if not provided. | `"ai-anime-generator"` |
+| `aspect_ratio` | ✗ | ✗ | The aspect ratio of the output image(s). If not specified, defaults to `1:1` (square). | `"1:1"` |
+| `model` | ✗ | ✗ | The AI model to use for image generation. Each model has different capabilities and costs. **Models:** - `default` - Use the model we recommend, which will change over time. This is recommended unless you need a specific model. This is the default behavior. - `flux-schnell` - from 5 credits/image - Supported resolutions: 640px, 1k, 2k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `z-image-turbo` - from 5 credits/image - Supported resolutions: 640px, 1k, 2k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `seedream-v4` - from 40 credits/image - Supported resolutions: 640px, 1k, 2k, 4k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `nano-banana` - from 50 credits/image - Supported resolutions: 640px, 1k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 2, 3, 4 - `nano-banana-2` - from 100 credits/image - Supported resolutions: 640px, 1k, 2k, 4k - Available for tiers: free, creator, pro, business - Image count allowed: 1, 4, 9, 16 - `nano-banana-pro` - from 150 credits/image - Supported resolutions: 1k, 2k, 4k - Available for tiers: creator, pro, business - Image count allowed: 1, 4, 9, 16 **Deprecated Enum Values:** - `seedream` - Use `seedream-v4` instead. | `"default"` |
+| `name` | ✗ | ✗ | Give your image a custom name for easy identification. | `"My Ai Image image"` |
+| `orientation` | ✗ | ✓ | DEPRECATED: Use `aspect_ratio` instead. The orientation of the output image(s). `aspect_ratio` takes precedence when `orientation` if both are provided. | `"landscape"` |
+| `resolution` | ✗ | ✗ | Maximum resolution (longest edge) for the output image. **Options:** - `640px` — up to 640px - `1k` — up to 1024px - `2k` — up to 2048px - `4k` — up to 4096px - `auto` — **Deprecated.** Mapped server-side from your subscription tier to the best matching resolution the model supports **Per-model support:** - `flux-schnell` - 640px, 1k, 2k - `z-image-turbo` - 640px, 1k, 2k - `seedream-v4` - 640px, 1k, 2k, 4k - `nano-banana` - 640px, 1k - `nano-banana-2` - 640px, 1k, 2k, 4k - `nano-banana-pro` - 1k, 2k, 4k Note: Resolution availability depends on the model and your subscription tier. | `"auto"` |
#### Synchronous Client
diff --git a/magic_hour/resources/v1/ai_image_generator/client.py b/magic_hour/resources/v1/ai_image_generator/client.py
index aca9b77..93d546b 100644
--- a/magic_hour/resources/v1/ai_image_generator/client.py
+++ b/magic_hour/resources/v1/ai_image_generator/client.py
@@ -191,7 +191,7 @@ def create(
- `nano-banana-2` - from 100 credits/image
- Supported resolutions: 640px, 1k, 2k, 4k
- Available for tiers: free, creator, pro, business
- - Image count allowed: 1, 2, 3, 4
+ - Image count allowed: 1, 4, 9, 16
- `nano-banana-pro` - from 150 credits/image
- Supported resolutions: 1k, 2k, 4k
- Available for tiers: creator, pro, business
@@ -432,7 +432,7 @@ async def create(
- `nano-banana-2` - from 100 credits/image
- Supported resolutions: 640px, 1k, 2k, 4k
- Available for tiers: free, creator, pro, business
- - Image count allowed: 1, 2, 3, 4
+ - Image count allowed: 1, 4, 9, 16
- `nano-banana-pro` - from 150 credits/image
- Supported resolutions: 1k, 2k, 4k
- Available for tiers: creator, pro, business
diff --git a/magic_hour/resources/v1/body_swap/README.md b/magic_hour/resources/v1/body_swap/README.md
new file mode 100644
index 0000000..9f82b51
--- /dev/null
+++ b/magic_hour/resources/v1/body_swap/README.md
@@ -0,0 +1,129 @@
+# v1.body_swap
+
+## Module Functions
+
+
+
+### Body Swap Generate Workflow
+
+The workflow performs the following action
+
+1. upload local assets to Magic Hour storage. So you can pass in a local path instead of having to upload files yourself
+2. trigger a generation
+3. poll for a completion status. This is configurable
+4. if success, download the output to local directory
+
+> [!TIP]
+> This is the recommended way to use the SDK unless you have specific needs where it is necessary to split up the actions.
+
+#### Parameters
+
+In Additional to the parameters listed in the `.create` section below, `.generate` introduces 3 new parameters:
+
+- `wait_for_completion` (bool, default True): Whether to wait for the project to complete.
+- `download_outputs` (bool, default True): Whether to download the generated files
+- `download_directory` (str, optional): Directory to save downloaded files (defaults to current directory)
+
+#### Synchronous Client
+
+```python
+from magic_hour import Client
+from os import getenv
+
+client = Client(token=getenv("API_TOKEN"))
+res = client.v1.body_swap.generate(
+ assets={
+ "person_file_path": "/path/to/person.png",
+ "scene_file_path": "/path/to/scene.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ wait_for_completion=True,
+ download_outputs=True,
+ download_directory=".",
+)
+```
+
+#### Asynchronous Client
+
+```python
+from magic_hour import AsyncClient
+from os import getenv
+
+client = AsyncClient(token=getenv("API_TOKEN"))
+res = await client.v1.body_swap.generate(
+ assets={
+ "person_file_path": "/path/to/person.png",
+ "scene_file_path": "/path/to/scene.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ wait_for_completion=True,
+ download_outputs=True,
+ download_directory=".",
+)
+```
+
+
+
+### Body Swap
+
+Swap a person into a scene image using Nano Banana 2. Credits depend on `resolution` (from 100 credits at 640px upward).
+
+**API Endpoint**: `POST /v1/body-swap`
+
+#### Parameters
+
+| Parameter | Required | Description | Example |
+| --------------------- | :------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- |
+| `assets` | ✓ | Person image and scene image for body swap | `{"person_file_path": "api-assets/id/1234.png", "scene_file_path": "api-assets/id/5678.png"}` |
+| `└─ person_file_path` | ✓ | Image of the person to place into the scene. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). See the [file upload guide](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) for details. | `"api-assets/id/1234.png"` |
+| `└─ scene_file_path` | ✓ | Target scene image (background). This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). See the [file upload guide](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) for details. | `"api-assets/id/5678.png"` |
+| `resolution` | ✓ | Output resolution. Determines credits charged for the run. | `"1k"` |
+| `name` | ✗ | Give your image a custom name for easy identification. | `"My Body Swap image"` |
+
+#### Synchronous Client
+
+```python
+from magic_hour import Client
+from os import getenv
+
+client = Client(token=getenv("API_TOKEN"))
+res = client.v1.body_swap.create(
+ assets={
+ "person_file_path": "api-assets/id/1234.png",
+ "scene_file_path": "api-assets/id/5678.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+)
+```
+
+#### Asynchronous Client
+
+```python
+from magic_hour import AsyncClient
+from os import getenv
+
+client = AsyncClient(token=getenv("API_TOKEN"))
+res = await client.v1.body_swap.create(
+ assets={
+ "person_file_path": "api-assets/id/1234.png",
+ "scene_file_path": "api-assets/id/5678.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+)
+```
+
+#### Response
+
+##### Type
+
+[V1BodySwapCreateResponse](/magic_hour/types/models/v1_body_swap_create_response.py)
+
+##### Example
+
+```python
+{"credits_charged": 100, "frame_cost": 100, "id": "cuid-example"}
+```
diff --git a/magic_hour/resources/v1/body_swap/__init__.py b/magic_hour/resources/v1/body_swap/__init__.py
new file mode 100644
index 0000000..1150d7e
--- /dev/null
+++ b/magic_hour/resources/v1/body_swap/__init__.py
@@ -0,0 +1,4 @@
+from .client import AsyncBodySwapClient, BodySwapClient
+
+
+__all__ = ["AsyncBodySwapClient", "BodySwapClient"]
diff --git a/magic_hour/resources/v1/body_swap/client.py b/magic_hour/resources/v1/body_swap/client.py
new file mode 100644
index 0000000..faf71a1
--- /dev/null
+++ b/magic_hour/resources/v1/body_swap/client.py
@@ -0,0 +1,286 @@
+import typing
+import typing_extensions
+
+from magic_hour.helpers.logger import get_sdk_logger
+from magic_hour.resources.v1.files.client import AsyncFilesClient, FilesClient
+from magic_hour.resources.v1.image_projects.client import (
+ AsyncImageProjectsClient,
+ ImageProjectsClient,
+)
+from magic_hour.types import models, params
+from make_api_request import (
+ AsyncBaseClient,
+ RequestOptions,
+ SyncBaseClient,
+ default_request_options,
+ to_encodable,
+ type_utils,
+)
+
+logger = get_sdk_logger(__name__)
+
+
+class BodySwapClient:
+ def __init__(self, *, base_client: SyncBaseClient):
+ self._base_client = base_client
+
+ def generate(
+ self,
+ *,
+ assets: params.V1BodySwapCreateBodyAssets,
+ resolution: typing_extensions.Literal["1k", "2k", "4k", "640px"],
+ name: typing.Union[
+ typing.Optional[str], type_utils.NotGiven
+ ] = type_utils.NOT_GIVEN,
+ wait_for_completion: bool = True,
+ download_outputs: bool = True,
+ download_directory: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ):
+ """
+ Generate body swap image (alias for create with additional functionality).
+
+ Swap a person into a scene image using Nano Banana 2. Credits depend on `resolution` (from 100 credits at 640px upward).
+
+ Args:
+ name: Give your image a custom name for easy identification.
+ assets: Person image and scene image for body swap
+ resolution: Output resolution. Determines credits charged for the run.
+ wait_for_completion: Whether to wait for the image project to complete
+ download_outputs: Whether to download the outputs
+ download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
+ request_options: Additional options to customize the HTTP request
+
+ Returns:
+ V1ImageProjectsGetResponseWithDownloads: The response from the Body Swap API with the downloaded paths if `download_outputs` is True.
+
+ Examples:
+ ```py
+ client.v1.body_swap.generate(
+ assets={
+ "person_file_path": "path/to/person.png",
+ "scene_file_path": "path/to/scene.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ wait_for_completion=True,
+ download_outputs=True,
+ download_directory=".",
+ )
+ ```
+ """
+
+ file_client = FilesClient(base_client=self._base_client)
+
+ person_file_path = assets["person_file_path"]
+ scene_file_path = assets["scene_file_path"]
+ assets["person_file_path"] = file_client.upload_file(file=person_file_path)
+ assets["scene_file_path"] = file_client.upload_file(file=scene_file_path)
+
+ create_response = self.create(
+ assets=assets,
+ resolution=resolution,
+ name=name,
+ request_options=request_options,
+ )
+ logger.info(f"Body Swap response: {create_response}")
+
+ image_projects_client = ImageProjectsClient(base_client=self._base_client)
+ response = image_projects_client.check_result(
+ id=create_response.id,
+ wait_for_completion=wait_for_completion,
+ download_outputs=download_outputs,
+ download_directory=download_directory,
+ )
+
+ return response
+
+ def create(
+ self,
+ *,
+ assets: params.V1BodySwapCreateBodyAssets,
+ resolution: typing_extensions.Literal["1k", "2k", "4k", "640px"],
+ name: typing.Union[
+ typing.Optional[str], type_utils.NotGiven
+ ] = type_utils.NOT_GIVEN,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> models.V1BodySwapCreateResponse:
+ """
+ Body Swap
+
+ Swap a person into a scene image using Nano Banana 2. Credits depend on `resolution` (from 100 credits at 640px upward).
+
+ POST /v1/body-swap
+
+ Args:
+ name: Give your image a custom name for easy identification.
+ assets: Person image and scene image for body swap
+ resolution: Output resolution. Determines credits charged for the run.
+ request_options: Additional options to customize the HTTP request
+
+ Returns:
+ Success
+
+ Raises:
+ ApiError: A custom exception class that provides additional context
+ for API errors, including the HTTP status code and response body.
+
+ Examples:
+ ```py
+ client.v1.body_swap.create(
+ assets={
+ "person_file_path": "api-assets/id/1234.png",
+ "scene_file_path": "api-assets/id/5678.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ )
+ ```
+ """
+ _json = to_encodable(
+ item={"name": name, "assets": assets, "resolution": resolution},
+ dump_with=params._SerializerV1BodySwapCreateBody,
+ )
+ return self._base_client.request(
+ method="POST",
+ path="/v1/body-swap",
+ auth_names=["bearerAuth"],
+ json=_json,
+ cast_to=models.V1BodySwapCreateResponse,
+ request_options=request_options or default_request_options(),
+ )
+
+
+class AsyncBodySwapClient:
+ def __init__(self, *, base_client: AsyncBaseClient):
+ self._base_client = base_client
+
+ async def generate(
+ self,
+ *,
+ assets: params.V1BodySwapCreateBodyAssets,
+ resolution: typing_extensions.Literal["1k", "2k", "4k", "640px"],
+ name: typing.Union[
+ typing.Optional[str], type_utils.NotGiven
+ ] = type_utils.NOT_GIVEN,
+ wait_for_completion: bool = True,
+ download_outputs: bool = True,
+ download_directory: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ):
+ """
+ Generate body swap image (alias for create with additional functionality).
+
+ Swap a person into a scene image using Nano Banana 2. Credits depend on `resolution` (from 100 credits at 640px upward).
+
+ Args:
+ name: Give your image a custom name for easy identification.
+ assets: Person image and scene image for body swap
+ resolution: Output resolution. Determines credits charged for the run.
+ wait_for_completion: Whether to wait for the image project to complete
+ download_outputs: Whether to download the outputs
+ download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
+ request_options: Additional options to customize the HTTP request
+
+ Returns:
+ V1ImageProjectsGetResponseWithDownloads: The response from the Body Swap API with the downloaded paths if `download_outputs` is True.
+
+ Examples:
+ ```py
+ await client.v1.body_swap.generate(
+ assets={
+ "person_file_path": "path/to/person.png",
+ "scene_file_path": "path/to/scene.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ wait_for_completion=True,
+ download_outputs=True,
+ download_directory=".",
+ )
+ ```
+ """
+
+ file_client = AsyncFilesClient(base_client=self._base_client)
+
+ person_file_path = assets["person_file_path"]
+ scene_file_path = assets["scene_file_path"]
+ assets["person_file_path"] = await file_client.upload_file(
+ file=person_file_path
+ )
+ assets["scene_file_path"] = await file_client.upload_file(
+ file=scene_file_path
+ )
+
+ create_response = await self.create(
+ assets=assets,
+ resolution=resolution,
+ name=name,
+ request_options=request_options,
+ )
+ logger.info(f"Body Swap response: {create_response}")
+
+ image_projects_client = AsyncImageProjectsClient(base_client=self._base_client)
+ response = await image_projects_client.check_result(
+ id=create_response.id,
+ wait_for_completion=wait_for_completion,
+ download_outputs=download_outputs,
+ download_directory=download_directory,
+ )
+
+ return response
+
+ async def create(
+ self,
+ *,
+ assets: params.V1BodySwapCreateBodyAssets,
+ resolution: typing_extensions.Literal["1k", "2k", "4k", "640px"],
+ name: typing.Union[
+ typing.Optional[str], type_utils.NotGiven
+ ] = type_utils.NOT_GIVEN,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> models.V1BodySwapCreateResponse:
+ """
+ Body Swap
+
+ Swap a person into a scene image using Nano Banana 2. Credits depend on `resolution` (from 100 credits at 640px upward).
+
+ POST /v1/body-swap
+
+ Args:
+ name: Give your image a custom name for easy identification.
+ assets: Person image and scene image for body swap
+ resolution: Output resolution. Determines credits charged for the run.
+ request_options: Additional options to customize the HTTP request
+
+ Returns:
+ Success
+
+ Raises:
+ ApiError: A custom exception class that provides additional context
+ for API errors, including the HTTP status code and response body.
+
+ Examples:
+ ```py
+ await client.v1.body_swap.create(
+ assets={
+ "person_file_path": "api-assets/id/1234.png",
+ "scene_file_path": "api-assets/id/5678.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ )
+ ```
+ """
+ _json = to_encodable(
+ item={"name": name, "assets": assets, "resolution": resolution},
+ dump_with=params._SerializerV1BodySwapCreateBody,
+ )
+ return await self._base_client.request(
+ method="POST",
+ path="/v1/body-swap",
+ auth_names=["bearerAuth"],
+ json=_json,
+ cast_to=models.V1BodySwapCreateResponse,
+ request_options=request_options or default_request_options(),
+ )
diff --git a/magic_hour/resources/v1/client.py b/magic_hour/resources/v1/client.py
index 5223e40..b245ad7 100644
--- a/magic_hour/resources/v1/client.py
+++ b/magic_hour/resources/v1/client.py
@@ -55,6 +55,7 @@
AsyncAutoSubtitleGeneratorClient,
AutoSubtitleGeneratorClient,
)
+from magic_hour.resources.v1.body_swap import AsyncBodySwapClient, BodySwapClient
from magic_hour.resources.v1.face_detection import (
AsyncFaceDetectionClient,
FaceDetectionClient,
@@ -138,6 +139,7 @@ def __init__(self, *, base_client: SyncBaseClient):
self.ai_voice_generator = AiVoiceGeneratorClient(base_client=self._base_client)
self.ai_voice_cloner = AiVoiceClonerClient(base_client=self._base_client)
self.head_swap = HeadSwapClient(base_client=self._base_client)
+ self.body_swap = BodySwapClient(base_client=self._base_client)
class AsyncV1Client:
@@ -190,3 +192,4 @@ def __init__(self, *, base_client: AsyncBaseClient):
)
self.ai_voice_cloner = AsyncAiVoiceClonerClient(base_client=self._base_client)
self.head_swap = AsyncHeadSwapClient(base_client=self._base_client)
+ self.body_swap = AsyncBodySwapClient(base_client=self._base_client)
diff --git a/magic_hour/resources/v1/image_to_video/README.md b/magic_hour/resources/v1/image_to_video/README.md
index 4045f51..54f8e74 100644
--- a/magic_hour/resources/v1/image_to_video/README.md
+++ b/magic_hour/resources/v1/image_to_video/README.md
@@ -100,7 +100,7 @@ For detailed examples, see the [product page](https://magichour.ai/products/imag
| `└─ end_image_file_path` | ✗ | — | The image to use as the last frame of the video. * **`ltx-2`**: Not supported * **`wan-2.2`**: Not supported * **`seedance`**: Supports 480p, 720p, 1080p. * **`seedance-2.0`**: Supports 480p, 720p. * **`kling-2.5`**: Supports 1080p. * **`kling-3.0`**: Supports 1080p. * **`sora-2`**: Not supported * **`veo3.1`**: Not supported * **`veo3.1-lite`**: Not supported Legacy models: * **`kling-1.6`**: Not supported | `"api-assets/id/1234.png"` |
| `└─ image_file_path` | ✓ | — | The path of the image file. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). See the [file upload guide](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) for details. | `"api-assets/id/1234.png"` |
| `end_seconds` | ✓ | ✗ | The total duration of the output video in seconds. Supported durations depend on the chosen model: * **`ltx-2`**: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30 * **`wan-2.2`**: 3, 4, 5, 6, 7, 8, 9, 10, 15 * **`seedance`**: 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 * **`seedance-2.0`**: 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 * **`kling-2.5`**: 5, 10 * **`kling-3.0`**: 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 * **`sora-2`**: 4, 8, 12, 24, 36, 48, 60 * **`veo3.1`**: 4, 6, 8, 16, 24, 32, 40, 48, 56 * **`veo3.1-lite`**: 8, 16, 24, 32, 40, 48, 56 Legacy models: * **`kling-1.6`**: 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60 | `5.0` |
-| `audio` | ✗ | ✗ | Whether to include audio in the video. Defaults to `false` if not specified. Audio support varies by model: * **`ltx-2`**: Automatically included with no extra credits * **`wan-2.2`**: Not supported * **`seedance`**: Not supported * **`seedance-2.0`**: Not supported * **`kling-2.5`**: Automatically included with no extra credits * **`kling-3.0`**: Toggle-able (can enable/disable) * **`sora-2`**: Automatically included with no extra credits * **`veo3.1`**: Toggle-able (can enable/disable) * **`veo3.1-lite`**: Toggle-able (can enable/disable) Legacy models: * **`kling-1.6`**: Not supported | `True` |
+| `audio` | ✗ | ✗ | Whether to include audio in the video. Defaults to `false` if not specified. Audio support varies by model: * **`ltx-2`**: Automatically included with no extra credits * **`wan-2.2`**: Not supported * **`seedance`**: Not supported * **`seedance-2.0`**: Automatically included with no extra credits * **`kling-2.5`**: Automatically included with no extra credits * **`kling-3.0`**: Toggle-able (can enable/disable) * **`sora-2`**: Automatically included with no extra credits * **`veo3.1`**: Toggle-able (can enable/disable) * **`veo3.1-lite`**: Toggle-able (can enable/disable) Legacy models: * **`kling-1.6`**: Not supported | `True` |
| `height` | ✗ | ✓ | `height` is deprecated and no longer influences the output video's resolution. This field is retained only for backward compatibility and will be removed in a future release. | `123` |
| `model` | ✗ | ✗ | The AI model to use for video generation. * `default`: uses our currently recommended model for general use. For paid tiers, defaults to `kling-3.0`. For free tiers, it defaults to `ltx-2`. * `ltx-2`: Fast iteration with audio and lip-sync * `wan-2.2`: Fast, strong visuals with effects * `seedance`: Fast iteration and start/end frames * `seedance-2.0`: State-of-the-art quality and consistency * `kling-2.5`: Motion, action, and camera control * `kling-3.0`: Cinematic, multi-scene storytelling * `sora-2`: Story-first concepts and creativity * `veo3.1`: Realistic visuals and prompt adherence * `veo3.1-lite`: Good for fast, affordable, high-quality daily generation. Legacy models: * `kling-1.6`: Reliable baseline with smooth motion If you specify the deprecated model value that includes the `-audio` suffix, this will be the same as included `audio` as `true`. | `"kling-3.0"` |
| `name` | ✗ | ✗ | Give your video a custom name for easy identification. | `"My Image To Video video"` |
diff --git a/magic_hour/resources/v1/image_to_video/client.py b/magic_hour/resources/v1/image_to_video/client.py
index 225548a..d4e560e 100644
--- a/magic_hour/resources/v1/image_to_video/client.py
+++ b/magic_hour/resources/v1/image_to_video/client.py
@@ -213,7 +213,7 @@ def create(
* **`ltx-2`**: Automatically included with no extra credits
* **`wan-2.2`**: Not supported
* **`seedance`**: Not supported
- * **`seedance-2.0`**: Not supported
+ * **`seedance-2.0`**: Automatically included with no extra credits
* **`kling-2.5`**: Automatically included with no extra credits
* **`kling-3.0`**: Toggle-able (can enable/disable)
* **`sora-2`**: Automatically included with no extra credits
@@ -516,7 +516,7 @@ async def create(
* **`ltx-2`**: Automatically included with no extra credits
* **`wan-2.2`**: Not supported
* **`seedance`**: Not supported
- * **`seedance-2.0`**: Not supported
+ * **`seedance-2.0`**: Automatically included with no extra credits
* **`kling-2.5`**: Automatically included with no extra credits
* **`kling-3.0`**: Toggle-able (can enable/disable)
* **`sora-2`**: Automatically included with no extra credits
diff --git a/magic_hour/resources/v1/text_to_video/README.md b/magic_hour/resources/v1/text_to_video/README.md
index 2d0b6bb..5a33366 100644
--- a/magic_hour/resources/v1/text_to_video/README.md
+++ b/magic_hour/resources/v1/text_to_video/README.md
@@ -103,7 +103,7 @@ For detailed examples, see the [product page](https://magichour.ai/products/text
| `└─ prompt` | ✓ | — | The prompt used for the video. | `"a dog running"` |
| `└─ quality_mode` | ✗ | ✓ | DEPRECATED: Please use `resolution` field instead. For backward compatibility: * `quick` maps to 720p resolution * `studio` maps to 1080p resolution This field will be removed in a future version. Use the `resolution` field to directly to specify the resolution. | `"quick"` |
| `aspect_ratio` | ✗ | ✗ | Determines the aspect ratio of the output video. * **`ltx-2`**: Supports 9:16, 16:9, 1:1. * **`wan-2.2`**: Supports 9:16, 16:9, 1:1. * **`seedance`**: Supports 9:16, 16:9, 1:1. * **`seedance-2.0`**: Supports 9:16, 16:9, 1:1. * **`kling-2.5`**: Supports 9:16, 16:9, 1:1. * **`kling-3.0`**: Supports 9:16, 16:9, 1:1. * **`sora-2`**: Supports 9:16, 16:9. * **`veo3.1`**: Supports 9:16, 16:9. * **`veo3.1-lite`**: Supports 9:16, 16:9. Legacy models: * **`kling-1.6`**: Supports 9:16, 16:9, 1:1. | `"16:9"` |
-| `audio` | ✗ | ✗ | Whether to include audio in the video. Defaults to `false` if not specified. Audio support varies by model: * **`ltx-2`**: Automatically included with no extra credits * **`wan-2.2`**: Not supported * **`seedance`**: Not supported * **`seedance-2.0`**: Not supported * **`kling-2.5`**: Automatically included with no extra credits * **`kling-3.0`**: Toggle-able (can enable/disable) * **`sora-2`**: Automatically included with no extra credits * **`veo3.1`**: Toggle-able (can enable/disable) * **`veo3.1-lite`**: Toggle-able (can enable/disable) Legacy models: * **`kling-1.6`**: Not supported | `True` |
+| `audio` | ✗ | ✗ | Whether to include audio in the video. Defaults to `false` if not specified. Audio support varies by model: * **`ltx-2`**: Automatically included with no extra credits * **`wan-2.2`**: Not supported * **`seedance`**: Not supported * **`seedance-2.0`**: Automatically included with no extra credits * **`kling-2.5`**: Automatically included with no extra credits * **`kling-3.0`**: Toggle-able (can enable/disable) * **`sora-2`**: Automatically included with no extra credits * **`veo3.1`**: Toggle-able (can enable/disable) * **`veo3.1-lite`**: Toggle-able (can enable/disable) Legacy models: * **`kling-1.6`**: Not supported | `True` |
| `model` | ✗ | ✗ | The AI model to use for video generation. * `default`: uses our currently recommended model for general use. For paid tiers, defaults to `kling-3.0`. For free tiers, it defaults to `ltx-2`. * `ltx-2`: Fast iteration with audio and lip-sync * `wan-2.2`: Fast, strong visuals with effects * `seedance`: Fast iteration and start/end frames * `seedance-2.0`: State-of-the-art quality and consistency * `kling-2.5`: Motion, action, and camera control * `kling-3.0`: Cinematic, multi-scene storytelling * `sora-2`: Story-first concepts and creativity * `veo3.1`: Realistic visuals and prompt adherence * `veo3.1-lite`: Good for fast, affordable, high-quality daily generation. Legacy models: * `kling-1.6`: Reliable baseline with smooth motion If you specify the deprecated model value that includes the `-audio` suffix, this will be the same as included `audio` as `true`. | `"kling-3.0"` |
| `name` | ✗ | ✗ | Give your video a custom name for easy identification. | `"My Text To Video video"` |
| `orientation` | ✗ | ✓ | Deprecated. Use `aspect_ratio` instead. | `"landscape"` |
diff --git a/magic_hour/resources/v1/text_to_video/client.py b/magic_hour/resources/v1/text_to_video/client.py
index a3da698..80bd090 100644
--- a/magic_hour/resources/v1/text_to_video/client.py
+++ b/magic_hour/resources/v1/text_to_video/client.py
@@ -220,7 +220,7 @@ def create(
* **`ltx-2`**: Automatically included with no extra credits
* **`wan-2.2`**: Not supported
* **`seedance`**: Not supported
- * **`seedance-2.0`**: Not supported
+ * **`seedance-2.0`**: Automatically included with no extra credits
* **`kling-2.5`**: Automatically included with no extra credits
* **`kling-3.0`**: Toggle-able (can enable/disable)
* **`sora-2`**: Automatically included with no extra credits
@@ -518,7 +518,7 @@ async def create(
* **`ltx-2`**: Automatically included with no extra credits
* **`wan-2.2`**: Not supported
* **`seedance`**: Not supported
- * **`seedance-2.0`**: Not supported
+ * **`seedance-2.0`**: Automatically included with no extra credits
* **`kling-2.5`**: Automatically included with no extra credits
* **`kling-3.0`**: Toggle-able (can enable/disable)
* **`sora-2`**: Automatically included with no extra credits
diff --git a/magic_hour/types/models/__init__.py b/magic_hour/types/models/__init__.py
index 2bd5012..1ddb7ce 100644
--- a/magic_hour/types/models/__init__.py
+++ b/magic_hour/types/models/__init__.py
@@ -21,6 +21,7 @@
from .v1_auto_subtitle_generator_create_response import (
V1AutoSubtitleGeneratorCreateResponse,
)
+from .v1_body_swap_create_response import V1BodySwapCreateResponse
from .v1_face_detection_create_response import V1FaceDetectionCreateResponse
from .v1_face_detection_get_response import V1FaceDetectionGetResponse
from .v1_face_detection_get_response_faces_item import (
@@ -72,6 +73,7 @@
"V1AudioProjectsGetResponseDownloadsItem",
"V1AudioProjectsGetResponseError",
"V1AutoSubtitleGeneratorCreateResponse",
+ "V1BodySwapCreateResponse",
"V1FaceDetectionCreateResponse",
"V1FaceDetectionGetResponse",
"V1FaceDetectionGetResponseFacesItem",
diff --git a/magic_hour/types/models/v1_body_swap_create_response.py b/magic_hour/types/models/v1_body_swap_create_response.py
new file mode 100644
index 0000000..5d74e92
--- /dev/null
+++ b/magic_hour/types/models/v1_body_swap_create_response.py
@@ -0,0 +1,33 @@
+import pydantic
+
+
+class V1BodySwapCreateResponse(pydantic.BaseModel):
+ """
+ Success
+ """
+
+ model_config = pydantic.ConfigDict(
+ arbitrary_types_allowed=True,
+ populate_by_name=True,
+ )
+
+ credits_charged: int = pydantic.Field(
+ alias="credits_charged",
+ )
+ """
+ The amount of credits deducted from your account to generate the image. We charge credits right when the request is made.
+
+ If an error occurred while generating the image(s), credits will be refunded and this field will be updated to include the refund.
+ """
+ frame_cost: int = pydantic.Field(
+ alias="frame_cost",
+ )
+ """
+ Deprecated: Previously represented the number of frames (original name of our credit system) used for image generation. Use 'credits_charged' instead.
+ """
+ id: str = pydantic.Field(
+ alias="id",
+ )
+ """
+ Unique ID of the image. Use it with the [Get image Project API](https://docs.magichour.ai/api-reference/image-projects/get-image-details) to fetch status and downloads.
+ """
diff --git a/magic_hour/types/params/__init__.py b/magic_hour/types/params/__init__.py
index 34e5542..05050e0 100644
--- a/magic_hour/types/params/__init__.py
+++ b/magic_hour/types/params/__init__.py
@@ -164,6 +164,14 @@
from .v1_auto_subtitle_generator_generate_body_assets import (
V1AutoSubtitleGeneratorGenerateBodyAssets,
)
+from .v1_body_swap_create_body import (
+ V1BodySwapCreateBody,
+ _SerializerV1BodySwapCreateBody,
+)
+from .v1_body_swap_create_body_assets import (
+ V1BodySwapCreateBodyAssets,
+ _SerializerV1BodySwapCreateBodyAssets,
+)
from .v1_face_detection_create_body import (
V1FaceDetectionCreateBody,
_SerializerV1FaceDetectionCreateBody,
@@ -340,6 +348,8 @@
"V1AutoSubtitleGeneratorCreateBodyStyle",
"V1AutoSubtitleGeneratorCreateBodyStyleCustomConfig",
"V1AutoSubtitleGeneratorGenerateBodyAssets",
+ "V1BodySwapCreateBody",
+ "V1BodySwapCreateBodyAssets",
"V1FaceDetectionCreateBody",
"V1FaceDetectionCreateBodyAssets",
"V1FaceDetectionGenerateBodyAssets",
@@ -416,6 +426,8 @@
"_SerializerV1AutoSubtitleGeneratorCreateBodyAssets",
"_SerializerV1AutoSubtitleGeneratorCreateBodyStyle",
"_SerializerV1AutoSubtitleGeneratorCreateBodyStyleCustomConfig",
+ "_SerializerV1BodySwapCreateBody",
+ "_SerializerV1BodySwapCreateBodyAssets",
"_SerializerV1FaceDetectionCreateBody",
"_SerializerV1FaceDetectionCreateBodyAssets",
"_SerializerV1FaceSwapCreateBody",
diff --git a/magic_hour/types/params/v1_ai_image_generator_create_body.py b/magic_hour/types/params/v1_ai_image_generator_create_body.py
index efe65bd..cc4c3b0 100644
--- a/magic_hour/types/params/v1_ai_image_generator_create_body.py
+++ b/magic_hour/types/params/v1_ai_image_generator_create_body.py
@@ -61,7 +61,7 @@ class V1AiImageGeneratorCreateBody(typing_extensions.TypedDict):
- `nano-banana-2` - from 100 credits/image
- Supported resolutions: 640px, 1k, 2k, 4k
- Available for tiers: free, creator, pro, business
- - Image count allowed: 1, 2, 3, 4
+ - Image count allowed: 1, 4, 9, 16
- `nano-banana-pro` - from 150 credits/image
- Supported resolutions: 1k, 2k, 4k
- Available for tiers: creator, pro, business
diff --git a/magic_hour/types/params/v1_body_swap_create_body.py b/magic_hour/types/params/v1_body_swap_create_body.py
new file mode 100644
index 0000000..093aac3
--- /dev/null
+++ b/magic_hour/types/params/v1_body_swap_create_body.py
@@ -0,0 +1,50 @@
+import pydantic
+import typing
+import typing_extensions
+
+from .v1_body_swap_create_body_assets import (
+ V1BodySwapCreateBodyAssets,
+ _SerializerV1BodySwapCreateBodyAssets,
+)
+
+
+class V1BodySwapCreateBody(typing_extensions.TypedDict):
+ """
+ V1BodySwapCreateBody
+ """
+
+ assets: typing_extensions.Required[V1BodySwapCreateBodyAssets]
+ """
+ Person image and scene image for body swap
+ """
+
+ name: typing_extensions.NotRequired[str]
+ """
+ Give your image a custom name for easy identification.
+ """
+
+ resolution: typing_extensions.Required[
+ typing_extensions.Literal["1k", "2k", "4k", "640px"]
+ ]
+ """
+ Output resolution. Determines credits charged for the run.
+ """
+
+
+class _SerializerV1BodySwapCreateBody(pydantic.BaseModel):
+ """
+ Serializer for V1BodySwapCreateBody handling case conversions
+ and file omissions as dictated by the API
+ """
+
+ model_config = pydantic.ConfigDict(
+ populate_by_name=True,
+ )
+
+ assets: _SerializerV1BodySwapCreateBodyAssets = pydantic.Field(
+ alias="assets",
+ )
+ name: typing.Optional[str] = pydantic.Field(alias="name", default=None)
+ resolution: typing_extensions.Literal["1k", "2k", "4k", "640px"] = pydantic.Field(
+ alias="resolution",
+ )
diff --git a/magic_hour/types/params/v1_body_swap_create_body_assets.py b/magic_hour/types/params/v1_body_swap_create_body_assets.py
new file mode 100644
index 0000000..9e19f15
--- /dev/null
+++ b/magic_hour/types/params/v1_body_swap_create_body_assets.py
@@ -0,0 +1,46 @@
+import pydantic
+import typing_extensions
+
+
+class V1BodySwapCreateBodyAssets(typing_extensions.TypedDict):
+ """
+ Person image and scene image for body swap
+ """
+
+ person_file_path: typing_extensions.Required[str]
+ """
+ Image of the person to place into the scene. This value is either
+ - a direct URL to the video file
+ - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
+
+ See the [file upload guide](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) for details.
+
+ """
+
+ scene_file_path: typing_extensions.Required[str]
+ """
+ Target scene image (background). This value is either
+ - a direct URL to the video file
+ - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
+
+ See the [file upload guide](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) for details.
+
+ """
+
+
+class _SerializerV1BodySwapCreateBodyAssets(pydantic.BaseModel):
+ """
+ Serializer for V1BodySwapCreateBodyAssets handling case conversions
+ and file omissions as dictated by the API
+ """
+
+ model_config = pydantic.ConfigDict(
+ populate_by_name=True,
+ )
+
+ person_file_path: str = pydantic.Field(
+ alias="person_file_path",
+ )
+ scene_file_path: str = pydantic.Field(
+ alias="scene_file_path",
+ )
diff --git a/magic_hour/types/params/v1_image_to_video_create_body.py b/magic_hour/types/params/v1_image_to_video_create_body.py
index 83fd92a..8f28017 100644
--- a/magic_hour/types/params/v1_image_to_video_create_body.py
+++ b/magic_hour/types/params/v1_image_to_video_create_body.py
@@ -30,7 +30,7 @@ class V1ImageToVideoCreateBody(typing_extensions.TypedDict):
* **`ltx-2`**: Automatically included with no extra credits
* **`wan-2.2`**: Not supported
* **`seedance`**: Not supported
- * **`seedance-2.0`**: Not supported
+ * **`seedance-2.0`**: Automatically included with no extra credits
* **`kling-2.5`**: Automatically included with no extra credits
* **`kling-3.0`**: Toggle-able (can enable/disable)
* **`sora-2`**: Automatically included with no extra credits
diff --git a/magic_hour/types/params/v1_text_to_video_create_body.py b/magic_hour/types/params/v1_text_to_video_create_body.py
index 1a99126..3048c17 100644
--- a/magic_hour/types/params/v1_text_to_video_create_body.py
+++ b/magic_hour/types/params/v1_text_to_video_create_body.py
@@ -41,7 +41,7 @@ class V1TextToVideoCreateBody(typing_extensions.TypedDict):
* **`ltx-2`**: Automatically included with no extra credits
* **`wan-2.2`**: Not supported
* **`seedance`**: Not supported
- * **`seedance-2.0`**: Not supported
+ * **`seedance-2.0`**: Automatically included with no extra credits
* **`kling-2.5`**: Automatically included with no extra credits
* **`kling-3.0`**: Toggle-able (can enable/disable)
* **`sora-2`**: Automatically included with no extra credits
diff --git a/pyproject.toml b/pyproject.toml
index 9c3c411..8071444 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "magic_hour"
-version = "0.61.0"
+version = "0.62.0"
description = "Python SDK for Magic Hour API"
readme = "README.md"
authors = []
diff --git a/tests/test_v1_body_swap_client.py b/tests/test_v1_body_swap_client.py
new file mode 100644
index 0000000..ac143c6
--- /dev/null
+++ b/tests/test_v1_body_swap_client.py
@@ -0,0 +1,79 @@
+import pydantic
+import pytest
+
+from magic_hour import AsyncClient, Client
+from magic_hour.environment import Environment
+from magic_hour.types import models
+
+
+def test_create_200_success_all_params() -> None:
+ """Tests a POST request to the /v1/body-swap endpoint.
+
+ Operation: create
+ Test Case ID: success_all_params
+ Expected Status: 200
+ Mode: Synchronous execution
+
+ Response : models.V1BodySwapCreateResponse
+
+ Validates:
+ - Authentication requirements are satisfied
+ - All required input parameters are properly handled
+ - Response status code is correct
+ - Response data matches expected schema
+
+ This test uses example data to verify the endpoint behavior.
+ """
+ # tests calling sync method with example data
+ client = Client(token="API_TOKEN", environment=Environment.MOCK_SERVER)
+ response = client.v1.body_swap.create(
+ assets={
+ "person_file_path": "api-assets/id/1234.png",
+ "scene_file_path": "api-assets/id/5678.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ )
+ try:
+ pydantic.TypeAdapter(models.V1BodySwapCreateResponse).validate_python(response)
+ is_valid_response_schema = True
+ except pydantic.ValidationError:
+ is_valid_response_schema = False
+ assert is_valid_response_schema, "failed response type check"
+
+
+@pytest.mark.asyncio
+async def test_await_create_200_success_all_params() -> None:
+ """Tests a POST request to the /v1/body-swap endpoint.
+
+ Operation: create
+ Test Case ID: success_all_params
+ Expected Status: 200
+ Mode: Asynchronous execution
+
+ Response : models.V1BodySwapCreateResponse
+
+ Validates:
+ - Authentication requirements are satisfied
+ - All required input parameters are properly handled
+ - Response status code is correct
+ - Response data matches expected schema
+
+ This test uses example data to verify the endpoint behavior.
+ """
+ # tests calling async method with example data
+ client = AsyncClient(token="API_TOKEN", environment=Environment.MOCK_SERVER)
+ response = await client.v1.body_swap.create(
+ assets={
+ "person_file_path": "api-assets/id/1234.png",
+ "scene_file_path": "api-assets/id/5678.png",
+ },
+ resolution="1k",
+ name="My Body Swap image",
+ )
+ try:
+ pydantic.TypeAdapter(models.V1BodySwapCreateResponse).validate_python(response)
+ is_valid_response_schema = True
+ except pydantic.ValidationError:
+ is_valid_response_schema = False
+ assert is_valid_response_schema, "failed response type check"