diff --git a/docusaurus.config.js b/docusaurus.config.js
index 352e69b00..3d9f5c6a2 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -272,6 +272,31 @@ export default async function createConfigAsync() {
],
],
plugins: [
+ [
+ '@docusaurus/plugin-client-redirects',
+ {
+ redirects: [
+ // Platform Cloud Integrations migration (Phase 3)
+ { from: '/platform-cloud/credentials/overview', to: '/platform-cloud/integrations/overview' },
+ { from: '/platform-cloud/credentials/data_repositories', to: '/platform-cloud/integrations/data-sources/overview' },
+ { from: '/platform-cloud/credentials/managed_identities', to: '/platform-cloud/integrations/identity/managed-identities' },
+ { from: '/platform-cloud/credentials/ssh_credentials', to: '/platform-cloud/integrations/identity/ssh-credentials' },
+ { from: '/platform-cloud/credentials/agent_credentials', to: '/platform-cloud/integrations/agent/credentials' },
+ { from: '/platform-cloud/credentials/container_registry_credentials', to: '/platform-cloud/integrations/container-registries/overview' },
+ { from: '/platform-cloud/credentials/aws_registry_credentials', to: '/platform-cloud/integrations/container-registries/aws' },
+ { from: '/platform-cloud/credentials/azure_registry_credentials', to: '/platform-cloud/integrations/container-registries/azure' },
+ { from: '/platform-cloud/credentials/docker_hub_registry_credentials', to: '/platform-cloud/integrations/container-registries/docker-hub' },
+ { from: '/platform-cloud/credentials/gitea_registry_credentials', to: '/platform-cloud/integrations/container-registries/gitea' },
+ { from: '/platform-cloud/credentials/github_registry_credentials', to: '/platform-cloud/integrations/container-registries/github' },
+ { from: '/platform-cloud/credentials/gitlab_registry_credentials', to: '/platform-cloud/integrations/container-registries/gitlab' },
+ { from: '/platform-cloud/credentials/google_registry_credentials', to: '/platform-cloud/integrations/container-registries/google' },
+ { from: '/platform-cloud/credentials/quay_registry_credentials', to: '/platform-cloud/integrations/container-registries/quay' },
+ { from: '/platform-cloud/git/overview', to: '/platform-cloud/integrations/git-providers/overview' },
+ { from: '/platform-cloud/enterprise/advanced-topics/manual-aws-batch-setup', to: '/platform-cloud/integrations/cloud-providers/aws/manual-setup' },
+ { from: '/platform-cloud/enterprise/advanced-topics/manual-azure-batch-setup', to: '/platform-cloud/integrations/cloud-providers/azure/manual-setup' },
+ ],
+ },
+ ],
process.env.EXCLUDE_PLATFORM_ENTERPRISE ? null : docs_platform_enterprise,
process.env.EXCLUDE_PLATFORM_CLOUD ? null : docs_platform_cloud,
process.env.EXCLUDE_PLATFORM_API ? null : docs_platform_api,
diff --git a/package-lock.json b/package-lock.json
index 3d621c722..be97e586b 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -9,6 +9,7 @@
"version": "0.0.0",
"dependencies": {
"@docusaurus/faster": "^3.9.2",
+ "@docusaurus/plugin-client-redirects": "^3.9.2",
"@rspack/core": "^1.4.11",
"@seqera/docusaurus-preset-seqera": "^1.0.37",
"image-size": "^2.0.2",
@@ -3708,6 +3709,30 @@
"react-dom": "*"
}
},
+ "node_modules/@docusaurus/plugin-client-redirects": {
+ "version": "3.9.2",
+ "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.9.2.tgz",
+ "integrity": "sha512-lUgMArI9vyOYMzLRBUILcg9vcPTCyyI2aiuXq/4npcMVqOr6GfmwtmBYWSbNMlIUM0147smm4WhpXD0KFboffw==",
+ "license": "MIT",
+ "dependencies": {
+ "@docusaurus/core": "3.9.2",
+ "@docusaurus/logger": "3.9.2",
+ "@docusaurus/utils": "3.9.2",
+ "@docusaurus/utils-common": "3.9.2",
+ "@docusaurus/utils-validation": "3.9.2",
+ "eta": "^2.2.0",
+ "fs-extra": "^11.1.1",
+ "lodash": "^4.17.21",
+ "tslib": "^2.6.0"
+ },
+ "engines": {
+ "node": ">=20.0"
+ },
+ "peerDependencies": {
+ "react": "^18.0.0 || ^19.0.0",
+ "react-dom": "^18.0.0 || ^19.0.0"
+ }
+ },
"node_modules/@docusaurus/plugin-content-blog": {
"version": "3.9.2",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.9.2.tgz",
diff --git a/package.json b/package.json
index 9d3ff5359..2c981bdda 100644
--- a/package.json
+++ b/package.json
@@ -42,6 +42,7 @@
},
"dependencies": {
"@docusaurus/faster": "^3.9.2",
+ "@docusaurus/plugin-client-redirects": "^3.9.2",
"@rspack/core": "^1.4.11",
"@seqera/docusaurus-preset-seqera": "^1.0.37",
"image-size": "^2.0.2",
diff --git a/platform-cloud/cloud-sidebar.json b/platform-cloud/cloud-sidebar.json
index f4c21b576..46b855255 100644
--- a/platform-cloud/cloud-sidebar.json
+++ b/platform-cloud/cloud-sidebar.json
@@ -30,30 +30,97 @@
},
{
"type": "category",
- "label": "Credentials",
+ "label": "Integrations",
+ "link": {"type": "doc", "id": "integrations/overview"},
+ "collapsed": true,
"items": [
- "credentials/overview",
- "git/overview",
{
"type": "category",
- "label": "Container registry credentials",
- "link": {"type": "doc", "id": "credentials/container_registry_credentials"},
+ "label": "Cloud providers",
+ "link": {"type": "doc", "id": "integrations/cloud-providers/overview"},
+ "collapsed": true,
+ "items": [
+ {
+ "type": "category",
+ "label": "AWS",
+ "link": {"type": "doc", "id": "integrations/cloud-providers/aws/overview"},
+ "collapsed": true,
+ "items": [
+ "integrations/cloud-providers/aws/iam-policies",
+ "integrations/cloud-providers/aws/credentials",
+ "integrations/cloud-providers/aws/data-access",
+ "integrations/cloud-providers/aws/eks-additions",
+ "integrations/cloud-providers/aws/manual-setup"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Azure",
+ "link": {"type": "doc", "id": "integrations/cloud-providers/azure/overview"},
+ "collapsed": true,
+ "items": [
+ "integrations/cloud-providers/azure/roles-and-service-principals",
+ "integrations/cloud-providers/azure/credentials",
+ "integrations/cloud-providers/azure/storage-access",
+ "integrations/cloud-providers/azure/manual-setup"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Google Cloud",
+ "link": {"type": "doc", "id": "integrations/cloud-providers/google/overview"},
+ "collapsed": true,
+ "items": [
+ "integrations/cloud-providers/google/iam",
+ "integrations/cloud-providers/google/credentials",
+ "integrations/cloud-providers/google/storage-access"
+ ]
+ }
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Git providers",
+ "link": {"type": "doc", "id": "integrations/git-providers/overview"},
+ "collapsed": true,
+ "items": [
+ "integrations/git-providers/github",
+ "integrations/git-providers/gitlab",
+ "integrations/git-providers/bitbucket",
+ "integrations/git-providers/gitea",
+ "integrations/git-providers/azure-devops",
+ "integrations/git-providers/codecommit",
+ "integrations/git-providers/multiple-credentials",
+ "integrations/git-providers/seqera-ai"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Container registries",
+ "link": {"type": "doc", "id": "integrations/container-registries/overview"},
+ "collapsed": true,
+ "items": [
+ "integrations/container-registries/aws",
+ "integrations/container-registries/azure",
+ "integrations/container-registries/docker-hub",
+ "integrations/container-registries/gitea",
+ "integrations/container-registries/github",
+ "integrations/container-registries/gitlab",
+ "integrations/container-registries/google",
+ "integrations/container-registries/quay"
+ ]
+ },
+ "integrations/data-sources/overview",
+ {
+ "type": "category",
+ "label": "Identity & access",
"collapsed": true,
"items": [
- "credentials/aws_registry_credentials",
- "credentials/azure_registry_credentials",
- "credentials/docker_hub_registry_credentials",
- "credentials/gitea_registry_credentials",
- "credentials/github_registry_credentials",
- "credentials/gitlab_registry_credentials",
- "credentials/google_registry_credentials",
- "credentials/quay_registry_credentials"
+ "integrations/identity/managed-identities",
+ "integrations/identity/ssh-credentials"
]
},
- "credentials/data_repositories",
- "credentials/managed_identities",
- "credentials/ssh_credentials",
- "credentials/agent_credentials"
+ "integrations/agent/credentials"
]
},
{
@@ -82,15 +149,7 @@
"resource-labels/overview",
"supported_software/fusion/overview",
"supported_software/agent/overview",
- "supported_software/dragen/overview",
- {
- "type": "category",
- "label": "Advanced options",
- "items":[
- "enterprise/advanced-topics/manual-aws-batch-setup",
- "enterprise/advanced-topics/manual-azure-batch-setup"
- ]
- }
+ "supported_software/dragen/overview"
]
},
{
diff --git a/platform-cloud/docs/compute-envs/aws-batch.md b/platform-cloud/docs/compute-envs/aws-batch.md
index ae487cd2f..dafa21506 100644
--- a/platform-cloud/docs/compute-envs/aws-batch.md
+++ b/platform-cloud/docs/compute-envs/aws-batch.md
@@ -19,704 +19,24 @@ There are two ways to create a Seqera Platform compute environment for AWS Batch
Both options require specific IAM permissions to function correctly, as well as access to an S3 bucket or EFS/FSx file system to store intermediate Nextflow files.
-## S3 bucket creation
+## Before you start
-AWS S3 (Simple Storage Service) is a type of **object storage**. To access input and output files using Seqera products like [Studios](../studios/overview) and [Data Explorer](../data/data-explorer) create one or more **S3 buckets**. An S3 bucket can also be used to store intermediate results of your Nextflow pipelines, as an alternative to using EFS or FSx file systems.
-:::note
-Using EFS or FSx as work directory is incompatible with Studios.
-:::
-
-1. Navigate to the [AWS S3 console](https://console.aws.amazon.com/s3/home).
-1. In the top right of the page, select the same region where you plan to create your AWS Batch compute environment.
-1. Select **Create bucket**.
-1. Enter a unique name for your bucket.
-1. Leave the rest of the options as default and select **Create bucket**.
-
-:::note
-S3 can be used by Nextflow for the storage of intermediate files. In production pipelines, this can amount to a lot of data. To reduce costs, consider using a retention policy when creating a bucket, such as automatically deleting intermediate files after 30 days. See the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/s3-empty-bucket-lifecycle-rule/) for more information.
-:::
-
-## EFS or FSx file system creation
-
-[AWS Elastic File System (EFS)](https://aws.amazon.com/efs/) and [AWS FSx for Lustre](https://aws.amazon.com/fsx/lustre/) are types of **file storage** that can be used as a Nextflow work directory to store intermediate files, as an alternative to using S3 buckets.
-
-:::note
-Using EFS or FSx as work directory is incompatible with Studios.
-:::
-
-To use EFS or FSx as your Nextflow work directory, create an EFS or FSx file system in the same region where you plan to create your AWS Batch compute environment.
-
-The creation of an EFS or FSx file system can be done automatically by Seqera when creating the AWS Batch compute environment, or manually by following the steps below. If you let Seqera create the file system automatically, it will also be deleted when the compute environment is removed from Platform, unless the "Dispose Resources" option is disabled in the advanced options.
-
-### Creating an EFS file system
-
-To create a new EFS file system manually, visit the [EFS console](https://console.aws.amazon.com/efs/home).
-
-1. Select **Create file system**.
-1. Optionally give it a name, then select the VPC where your AWS Batch compute environment will be created.
-1. Leave the rest of the options as default and select **Create file system**.
-
-### Creating an FSx file system
-
-To create a new FSx for Lustre file system manually, visit the [FSx console](https://console.aws.amazon.com/fsx/home).
-
-1. Select **Create file system**.
-1. Select FSx for Lustre
-1. Follow the prompts to configure the file system according to your requirements, then select **Next**.
-1. Review the configuration and select **Create file system**.
-
-Make sure the [Lustre client](https://docs.aws.amazon.com/fsx/latest/LustreGuide/install-lustre-client.html) is available in the AMIs used by your AWS Batch compute environment to allow mounting FSx file systems.
-
-## Required Platform IAM permissions
-
-To create and launch pipelines, explore buckets with Data Explorer or run Studio sessions with the AWS Batch compute environment, an IAM user with specific permissions must be provided. Some permissions are mandatory for the compute environment to be created and function correctly, while others are optional and used for example to provide list of values to pick from in the Platform UI.
-
-Permissions can be attached directly to an [IAM user](#iam-user-creation), or to an [IAM role](#iam-role-creation-optional) that the IAM user can assume when accessing AWS resources.
-
-A permissive and broad policy with all the required permissions is provided here for a quick start. However, we recommend following the principle of least privilege and only granting the necessary permissions for your use case, as shown in the following sections.
-
-
-Full permissive policy (for reference)
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "BatchEnvironmentManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "batch:CreateComputeEnvironment",
- "batch:CreateJobQueue",
- "batch:DeleteComputeEnvironment",
- "batch:DeleteJobQueue",
- "batch:UpdateComputeEnvironment",
- "batch:UpdateJobQueue"
- ],
- "Resource": [
- "arn:aws:batch:*:*:compute-environment/TowerForge-*",
- "arn:aws:batch:*:*:job-queue/TowerForge-*"
- ]
- },
- {
- "Sid": "BatchEnvironmentListing",
- "Effect": "Allow",
- "Action": [
- "batch:DescribeComputeEnvironments",
- "batch:DescribeJobDefinitions",
- "batch:DescribeJobQueues",
- "batch:DescribeJobs"
- ],
- "Resource": "*"
- },
- {
- "Sid": "BatchJobsManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "batch:CancelJob",
- "batch:RegisterJobDefinition",
- "batch:SubmitJob",
- "batch:TagResource",
- "batch:TerminateJob"
- ],
- "Resource": [
- "arn:aws:batch:*:*:job-definition/*",
- "arn:aws:batch:*:*:job-queue/TowerForge-*",
- "arn:aws:batch:*:*:job/*"
- ]
- },
- {
- "Sid": "LaunchTemplateManagement",
- "Effect": "Allow",
- "Action": [
- "ec2:CreateLaunchTemplate",
- "ec2:DeleteLaunchTemplate",
- "ec2:DescribeLaunchTemplates",
- "ec2:DescribeLaunchTemplateVersions"
- ],
- "Resource": "*"
- },
- {
- "Sid": "PassRolesToBatchCanBeRestricted",
- "Effect": "Allow",
- "Action": "iam:PassRole",
- "Resource": "*",
- "Condition": {
- "StringEquals": {
- "iam:PassedToService": [
- "batch.amazonaws.com",
- "ec2.amazonaws.com"
- ]
- }
- }
- },
- {
- "Sid": "CloudWatchLogsAccessCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "logs:Describe*",
- "logs:FilterLogEvents",
- "logs:Get*",
- "logs:List*",
- "logs:StartQuery",
- "logs:StopQuery",
- "logs:TestMetricFilter"
- ],
- "Resource": "*"
- },
- {
- "Sid": "OptionalS3PlatformDataAccessCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "s3:Get*",
- "s3:List*",
- "s3:PutObject"
- ],
- "Resource": "*"
- },
- {
- "Sid": "OptionalIAMManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "iam:AddRoleToInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:CreateInstanceProfile",
- "iam:CreateRole",
- "iam:DeleteInstanceProfile",
- "iam:DeleteRole",
- "iam:DeleteRolePolicy",
- "iam:DetachRolePolicy",
- "iam:GetRole",
- "iam:ListAttachedRolePolicies",
- "iam:ListRolePolicies",
- "iam:PutRolePolicy",
- "iam:RemoveRoleFromInstanceProfile",
- "iam:TagInstanceProfile",
- "iam:TagRole"
- ],
- "Resource": [
- "arn:aws:iam::*:role/TowerForge-*",
- "arn:aws:iam::*:instance-profile/TowerForge-*"
- ]
- },
- {
- "Sid": "OptionalFetchOptimizedAMIMetadata",
- "Effect": "Allow",
- "Action": "ssm:GetParameters",
- "Resource": "arn:aws:ssm:*:*:parameter/aws/service/ecs/*"
- },
- {
- "Sid": "OptionalEC2MetadataDescribe",
- "Effect": "Allow",
- "Action": [
- "ec2:DescribeAccountAttributes",
- "ec2:DescribeImages",
- "ec2:DescribeInstanceTypeOfferings",
- "ec2:DescribeInstanceTypes",
- "ec2:DescribeKeyPairs",
- "ec2:DescribeSecurityGroups",
- "ec2:DescribeSubnets",
- "ec2:DescribeVpcs"
- ],
- "Resource": "*"
- },
- {
- "Sid": "OptionalFSXManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "fsx:CreateFileSystem",
- "fsx:DeleteFileSystem",
- "fsx:DescribeFileSystems",
- "fsx:TagResource"
- ],
- "Resource": "*"
- },
- {
- "Sid": "OptionalEFSManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "elasticfilesystem:CreateFileSystem",
- "elasticfilesystem:DeleteFileSystem",
- "elasticfilesystem:CreateMountTarget",
- "elasticfilesystem:DeleteMountTarget",
- "elasticfilesystem:DescribeFileSystems",
- "elasticfilesystem:DescribeMountTargets",
- "elasticfilesystem:UpdateFileSystem",
- "elasticfilesystem:PutLifecycleConfiguration",
- "elasticfilesystem:TagResource"
- ],
- "Resource": "*"
- },
- {
- "Sid": "OptionalPipelineSecretsListing",
- "Effect": "Allow",
- "Action": "secretsmanager:ListSecrets",
- "Resource": "*"
- },
- {
- "Sid": "OptionalPipelineSecretsManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "secretsmanager:DescribeSecret",
- "secretsmanager:DeleteSecret",
- "secretsmanager:CreateSecret"
- ],
- "Resource": "arn:aws:secretsmanager:*:*:secret:tower-*"
- }
- ]
-}
-```
-
-
-
-### AWS Batch management
-
-The first section of the policy allows Seqera to create, update and delete Batch compute environments ("CE"), job queues ("JQ") and jobs.
-
-If you are required to use manually created CEs and JQs or prefer to manage their lifecycle yourself, you can remove the permissions to manipulate CEs and JQs from the policy. The minimum permissions required are:
-
-- `batch:DescribeJobs` to report job status
-- `batch:DescribeJobDefinitions` to list existing job definitions
-- `batch:RegisterJobDefinition` to create new job definitions
-- `batch:CancelJob` to cancel jobs
-- `batch:SubmitJob` to submit jobs
-- `batch:TagResource` to tag jobs
-- `batch:TerminateJob` to terminate jobs
-
-You can use `batch:DescribeJobQueues` to list the existing job queues in a drop-down menu but it's not required if you're specifying manually created job queues.
-However, it is required when you let Seqera create and manage job queues automatically (using the Forge tool). In this case, the `batch:DescribeComputeEnvironments` permission must also be added.
-
-You can also restrict permissions based on resource tags. These are defined by users when they [set up a pipeline in Platform](https://docs.seqera.io/platform-enterprise/resource-labels/overview).
-
-```json
-{
- "Sid": "BatchEnvironmentListing",
- "Effect": "Allow",
- "Action": [
- "batch:DescribeJobDefinitions",
- "batch:DescribeJobs"
- ],
- "Resource": "*"
-},
-{
- "Sid": "BatchJobsManagement",
- "Effect": "Allow",
- "Action": [
- "batch:CancelJob",
- "batch:RegisterJobDefinition",
- "batch:SubmitJob",
- "batch:TagResource",
- "batch:TerminateJob"
- ],
- "Resource": [
- "arn:aws:batch:::job-queue/MyCustomJQ",
- "arn:aws:batch:::job-definition/*",
- "arn:aws:batch:::job/*"
- ],
- "Condition": {
- "StringEqualsIfExists": {
- "aws:ResourceTag/MyCustomTag": "MyCustomValue"
- }
- }
-}
-```
-
-:::warning
-Restricting the `batch` actions using resource tags requires that you set the appropriate tags on each Seqera pipeline when configuring it in Platform. Forgetting to set the tag will cause the pipeline to fail to run.
-:::
-
-The job definition and job name resources cannot be restricted to specific names, as Seqera creates job definitions and jobs with dynamic names. Therefore, the wildcard `*` must be used in the name of these resources. In addition, `batch:SubmitJob` requires permission on both job definitions and job queues, so make sure to include both ARNs in the `Resource` array.
-
-If you prefer to let Seqera manage Batch resources for you, you can still restrict the permissions to specific resources in your account ID and region; you can also restrict permissions based on Resource tag, as shown with the `Condition`s in the example above.
-
-:::note
-The quick start policy is expecting CE and JQ names automatically created by Seqera to start with the `TowerForge-` prefix, which is the default prefix used by Platform Cloud resources and can't be customized.
-:::
-
-### Launch template management
-
-Seqera requires the ability to create and manage EC2 launch templates using optimized AMIs identified via AWS Systems Manager (SSM).
-
-:::note
-AWS does not support restricting IAM permissions on EC2 launch templates based on specific resource names or tags. As a result, permission to operate on any resource `*` must be granted.
-:::
-
-### Pass role to Batch
-
-The `iam:PassRole` permission allows Seqera to pass [execution IAM roles](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html#create-execution-role) to AWS Batch to run Nextflow pipelines.
-
-Permissions can be restricted to only allow passing the manually created roles or the roles created by Seqera automatically with the default prefix `TowerForge-` to the AWS Batch and EC2 services, in a specific account:
+Set up the [AWS integration](/platform-cloud/integrations/cloud-providers/aws/overview) before creating an AWS Batch compute environment in Seqera:
-```json
-{
- "Sid": "PassRolesToBatch",
- "Effect": "Allow",
- "Action": "iam:PassRole",
- "Resource": "arn:aws:iam:::role/TowerForge-*",
- "Condition": {
- "StringEquals": {
- "iam:PassedToService": [
- "batch.amazonaws.com",
- "ec2.amazonaws.com"
- ]
- }
- }
-}
-```
-
-### CloudWatch logs access
-
-Seqera requires access to CloudWatch logs to display relevant log data in the web interface.
-
-The policy can be scoped down to limit access to the [specific log group](#advanced-options) defined on the compute environment in a specific account and region:
-
-```json
-{
- "Sid": "CloudWatchLogsAccess",
- "Effect": "Allow",
- "Action": [
- "logs:Describe*",
- "logs:FilterLogEvents",
- "logs:Get*",
- "logs:List*",
- "logs:StartQuery",
- "logs:StopQuery",
- "logs:TestMetricFilter"
- ],
- "Resource": "arn:aws:logs:::log-group:/aws/batch/job/*"
-}
-```
-
-### S3 access (optional)
-
-Seqera automatically attempts to fetch a list of S3 buckets available in the AWS account connected to Platform, to provide them in a drop-down menu to be used as Nextflow working directory, and make the compute environment creation smoother. This feature is optional, and users can type the bucket name manually when setting up a compute environment. To allow Seqera to fetch the list of buckets in the account, the `s3:ListAllMyBuckets` action can be added, and it must have the `Resource` field set to `*`, as shown in the generic policy at the beginning of this document.
-
-Seqera offers several products to manipulate data on AWS S3 buckets, such as [Studios](../studios/overview) and [Data Explorer](../data/data-explorer); if these features are not used the related permissions can be omitted.
-
-The IAM policy can be scoped down to only allow limited read/write permissions in certain S3 buckets used by Studios/Data Explorer. In addition, the policy must include permission to check the region and list the content of the S3 bucket used as Nextflow work directory. We also recommend granting the `s3:GetObject` permission on the work directory path to fetch Nextflow log files.
-
-:::note
-If you opted to create a separate S3 bucket only for Nextflow work directories, there is no need for the IAM user to have read/write access to it. If Seqera is allowed to manage resources (using Batch Forge) the IAM roles automatically created will have the necessary permissions.
-
-If you set up the compute environment manually, you can create the required IAM roles with the necessary permissions as detailed in the [manual AWS Batch setup documentation](../enterprise/advanced-topics/manual-aws-batch-setup).
-:::
-
-```json
-{
- "Sid": "S3CheckBucketWorkDirectory",
- "Effect": "Allow",
- "Action": [
- "s3:GetBucketLocation",
- "s3:ListBucket"
- ],
- "Resource": [
- "arn:aws:s3:::example-bucket-used-as-work-directory"
- ]
-},
-{
- "Sid": "S3ReadOnlyNextflowLogFiles",
- "Effect": "Allow",
- "Action": [
- "s3:GetObject"
- ],
- "Resource": [
- "arn:aws:s3:::example-bucket-used-as-work-directory/path/to/work/directory/*"
- ]
-},
-{
- "Sid": "S3ReadWriteBucketsForStudiosDataExplorer",
- "Effect": "Allow",
- "Action": [
- "s3:Get*",
- "s3:List*",
- "s3:PutObject"
- ],
- "Resource": [
- "arn:aws:s3:::example-bucket-read-write-studios",
- "arn:aws:s3:::example-bucket-read-write-studios/*",
- "arn:aws:s3:::example-bucket-read-write-data-explorer",
- "arn:aws:s3:::example-bucket-read-write-data-explorer/*"
- ]
-}
-```
-
-### IAM roles for AWS Batch (optional)
-
-Seqera can automatically create the IAM roles needed to interact with AWS Batch and other AWS services. You can opt out of this behavior by creating the required IAM roles manually and providing their ARNs during compute environment creation in Platform: refer to the [documentation](../enterprise/advanced-topics/manual-aws-batch-setup) for more details on how to manually set up IAM roles.
-
-To allow Seqera to create IAM roles but restrict it to your specific account and the default IAM role prefix, use the following statement:
-
-```json
-{
- "Sid": "IAMRoleAndProfileManagement",
- "Effect": "Allow",
- "Action": [
- "iam:AddRoleToInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:CreateInstanceProfile",
- "iam:CreateRole",
- "iam:DeleteInstanceProfile",
- "iam:DeleteRole",
- "iam:DeleteRolePolicy",
- "iam:DetachRolePolicy",
- "iam:GetRole",
- "iam:ListAttachedRolePolicies",
- "iam:ListRolePolicies",
- "iam:PutRolePolicy",
- "iam:RemoveRoleFromInstanceProfile",
- "iam:TagInstanceProfile",
- "iam:TagRole"
- ],
- "Resource": [
- "arn:aws:iam:::role/TowerForge-*"
- "arn:aws:iam:::instance-profile/TowerForge-*"
- ]
-}
-```
-
-:::note
-The quick start policy is expecting role names automatically created by Seqera to start with the `TowerForge-` prefix, which is the default prefix used by Platform Cloud resources and can't be customized.
-:::
-
-### AWS Systems Manager (optional)
-
-Seqera Platform can interact with AWS Systems Manager (SSM) to [identify ECS Optimized AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/retrieve-ecs-optimized_AMI.html) for pipeline execution. This permission is optional, meaning that a [custom AMI ID](#advanced-options) can be provided at compute environment creation, removing the need for this permission.
-
-### EC2 describe permissions (optional)
-
-Seqera can interact with EC2 to retrieve information about existing AWS resources in your account, including VPCs, subnets, and security groups. This data is used to populate dropdown menus in the Platform UI when creating new compute environments. While these permissions are optional, they are recommended to enhance the user experience. Without these permissions, resource ARNs need to be manually entered in the interface by the user.
-
-:::note
-AWS does not support restricting IAM permissions on EC2 Describe actions based on specific resource names or tags. As a result, permission to operate on any resource `*` must be granted.
-:::
-
-### FSx file systems (optional)
-
-Seqera can manage [AWS FSx file systems](https://aws.amazon.com/fsx/), if needed by the pipelines.
-
-This section of the policy is optional and can be omitted if FSx file systems are not used by your pipelines. The describe actions cannot be restricted to specific resources, so permission to operate on any resource `*` must be granted. The management actions can be restricted to specific resources, like in the example below.
-
-```json
-{
- "Sid": "FSxDescribe",
- "Effect": "Allow",
- "Action": [
- "fsx:DescribeFileSystems"
- ],
- "Resource": "*"
-},
-{
- "Sid": "FSxManagement",
- "Effect": "Allow",
- "Action": [
- "fsx:CreateFileSystem",
- "fsx:DeleteFileSystem",
- "fsx:TagResource"
- ],
- "Resource": "arn:aws:fsx:::file-system/MyManualFSx"
-}
-```
-
-### EFS file systems (optional)
-
-Seqera can manage [AWS EFS file systems](https://aws.amazon.com/efs/), if needed by the pipelines.
-
-This section of the policy is optional and can be omitted if EFS file systems are not used by your pipelines. The describe actions cannot be restricted to specific resources, so permission to operate on any resource `*` must be granted. The management actions can be restricted to specific resources, like in the example below.
-
-```json
-{
- "Sid": "EFSDescribe",
- "Effect": "Allow",
- "Action": [
- "elasticfilesystem:DescribeFileSystems",
- "elasticfilesystem:DescribeMountTargets"
- ],
- "Resource": "*"
-},
-{
- "Sid": "EFSManagement",
- "Effect": "Allow",
- "Action": [
- "elasticfilesystem:CreateFileSystem",
- "elasticfilesystem:DeleteFileSystem",
- "elasticfilesystem:CreateMountTarget",
- "elasticfilesystem:DeleteMountTarget",
- "elasticfilesystem:UpdateFileSystem",
- "elasticfilesystem:PutLifecycleConfiguration",
- "elasticfilesystem:TagResource"
- ],
- "Resource": "arn:aws:elasticfilesystem:::file-system/MyManualEFS"
-}
-```
-
-### Pipeline secrets (optional)
-
-Seqera can synchronize [pipeline secrets](../secrets/overview) defined on the Platform workspace with AWS Secrets Manager, which requires additional permissions on the IAM user. If you do not plan to use pipeline secrets, you can omit this section of the policy.
-
-The listing of secrets cannot be restricted, but the management actions can be restricted to only allow managing secrets in a specific account and region, which must be the same region where the pipeline runs. Note that Seqera only creates secrets with the `tower-` prefix.
-
-```json
-{
- "Sid": "PipelineSecretsListing",
- "Effect": "Allow",
- "Action": "secretsmanager:ListSecrets",
- "Resource": "*"
-},
-{
- "Sid": "PipelineSecretsManagementCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "secretsmanager:DescribeSecret",
- "secretsmanager:DeleteSecret",
- "secretsmanager:CreateSecret"
- ],
- "Resource": "arn:aws:secretsmanager:::secret:tower-*"
-}
-```
-
-#### Additional steps required to use secrets in a pipeline
-
-To successfully use pipeline secrets, the IAM roles manually created must follow the steps detailed in the [documentation](../secrets/overview#aws-secrets-manager-integration).
-
-## Create the IAM policy
-
-The policy above must be created in the AWS account where the AWS Batch resources need to be created.
-
-1. Open the [AWS IAM console](https://console.aws.amazon.com/iam) in the account where you want to create the AWS Batch resources.
-1. From the left navigation menu, select **Policies** under **Access management**.
-1. Select **Create policy**.
-1. On the **Policy editor** section, select the **JSON** tab.
-1. Following the instructions detailed in the [IAM permissions breakdown section](#required-platform-iam-permissions) replace the default text in the policy editor area under the **JSON** tab with a policy adapted to your use case, then select **Next**.
-1. Enter a name and description for the policy on the **Review and create** page, then select **Create policy**.
-
-## IAM user creation
-
-Seqera requires an Identity and Access Management (IAM) User to create and manage AWS Batch resources in your AWS account. We recommend creating a separate IAM policy rather an IAM User inline policy, as the latter only allows 2048 characters, which may not be sufficient for all the required permissions.
-
-In certain scenarios, for example when multiple users need to access the same AWS account and provision AWS Batch resources, an IAM role with the required permissions can be created instead, and the IAM user can assume that role when accessing AWS resources, as detailed in the [IAM role creation (optional)](#iam-role-creation-optional) section.
-
-Depending whether you choose to let Seqera automatically create the required AWS Batch resources in your account, or prefer to set them up manually, the IAM user must have specific permissions as detailed in the [Required Platform IAM permissions](#required-platform-iam-permissions) section. Alternatively, you can create an IAM role with the required permissions and allow the IAM user to assume that role when accessing AWS resources, as detailed in the [IAM role creation (optional)](#iam-role-creation-optional) section.
-
-### Create an IAM user
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select **Create User** at the top right of the page.
-1. Enter a name for your user (e.g., _seqera_) and select **Next**.
-1. Under **Permission options**, select **Attach policies directly**, then search for and select the policy created above, and select **Next**.
- * If you prefer to make the IAM user assume a role to manage AWS resources (see the [IAM role creation (optional)](#iam-role-creation-optional) section), create a policy with the following content (edit the AWS principal with the ARN of the role created) and attach it to the IAM user:
-
- ```json
- {
- "Sid": "AssumeRoleToManageBatchResources",
- "Effect": "Allow",
- "Action": "sts:AssumeRole",
- "Resource": "arn:aws:iam:::role/",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- }
- ```
-1. On the last page, review the user details and select **Create user**.
-
-The user has now been created. The most up-to-date instructions for creating an IAM user can be found in the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html).
-
-### Obtain IAM user credentials
-
-To get the credentials needed to connect Seqera to your AWS account, follow these steps:
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select the newly created user from the users table.
-1. Select the **Security credentials** tab, then select **Create access key** under the **Access keys** section.
-1. In the **Use case** dialog that appears, select **Command line interface (CLI)**, then tick the confirmation checkbox at the bottom to acknowledge that you want to proceed creating an access key, and select **Next**.
-1. Optionally provide a description for the access key, like the reason for creating it, then select **Create access key**.
-1. Save the **Access key** and **Secret access key** in a secure location as you will need to provide them when creating credentials in Seqera.
-
-## IAM role creation (optional)
-
-Rather than attaching permissions directly to the IAM user, you can create an IAM role with the required permissions and allow the IAM user to assume that role when accessing AWS resources. This is useful when multiple IAM users are used to access the same AWS account: this way the actual permissions to operate on the resources are only granted to a single centralized role.
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Roles** in the left navigation menu, then select **Create role** at the top right of the page.
-1. Select **Custom trust policy** as the type of trusted entity, provide the following policy and edit the AWS principal with the ARN of the IAM user created in the [IAM user creation](#iam-user-creation) section, then select **Next**.
- ```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": [
- "arn:aws:iam:::user/"
- ]
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- },
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": [
- "arn:aws:iam:::user/"
- ]
- },
- "Action": "sts:TagSession"
- }
- ]
- }
- ```
-1. On the **Permissions** page, search for and select the policy created in the [IAM user creation](#iam-user-creation) section, then select **Next**.
-1. Give the role a name and optionally a description, review the details of the role, optionally provide tags to help you identify the role, then select **Create role**.
-
-Multiple users can be specified in the trust policy by adding more ARNs to the `Principal` section.
-
-:::note
-Seqera Platform generates the `External ID` value during AWS credential creation. For role-based credentials, use this exact value in your IAM trust policy (`sts:ExternalId`).
-:::
+- [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) — required permissions (select the **AWS Batch** tab).
+- [AWS credentials](/platform-cloud/integrations/cloud-providers/aws/credentials) — IAM policy, IAM user, IAM role, and how to add credentials in Seqera.
+- [AWS data access](/platform-cloud/integrations/cloud-providers/aws/data-access) — S3 bucket, EFS, and FSx prerequisites.
+- [AWS Batch manual setup](/platform-cloud/integrations/cloud-providers/aws/manual-setup) — alternative to Batch Forge.
-### Role-based trust policy example (Seqera Cloud)
-
-For role-based AWS credentials in Seqera Cloud, allow the Seqera Cloud access role `arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole` in your trust policy and enforce the `External ID` generated during credential creation:
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- },
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
- },
- "Action": "sts:TagSession"
- }
- ]
-}
-```
-
-## AWS credential options
-
-AWS credentials can be configured in two ways:
-
-- **Key-based credentials**: Access key and secret key with direct IAM permissions. If you provide a role ARN in **Assume role**, the **Generate External ID** switch is displayed and External ID generation is optional.
-- **Role-based credentials (recommended)**: Use role assumption only (no static keys). Paste the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**. External ID is generated automatically when you save.
-
-Use the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**. This field is available for both key-based and role-based credentials. It is optional for key-based credentials and required for role-based credentials.
-
-Existing credentials created before March 2026 continue to work without changes.
+{/* Anchor stubs preserved for backwards compatibility with deep links from older content. */}
+
+
+
+
+
+
+
+
## Automatic configuration of Batch resources
@@ -741,12 +61,12 @@ Depending on the provided configuration in the UI, Seqera might also create IAM
1. Select **AWS Batch** as the target platform.
1. From the **Credentials** drop-down, select existing AWS credentials, or select **+** to add new credentials. If you're using existing credentials, skip to step 9.
:::note
- You can create multiple credentials in your Seqera environment. See [Credentials](../credentials/overview).
+ You can create multiple credentials in your Seqera environment. See [Credentials](/platform-cloud/integrations/overview).
:::
1. Enter a name, e.g., _AWS Credentials_.
1. Under **AWS credential mode**, select **Keys** or **Role**.
1. For **Keys** mode:
- - Add the **Access key** and **Secret key** you [previously obtained](#obtain-iam-user-credentials).
+ - Add the **Access key** and **Secret key** [obtained from the AWS IAM console](/platform-cloud/integrations/cloud-providers/aws/credentials#obtain-iam-user-credentials).
- Optionally paste the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**.
- If you paste a role ARN in **Assume role**, the **Generate External ID** switch is displayed. Generating an External ID is optional in **Keys** mode.
- If **Generate External ID** is selected, an External ID is automatically generated and shown after you save the credential.
@@ -757,7 +77,7 @@ Depending on the provided configuration in the UI, Seqera might also create IAM
When using AWS keys without an assumed role, the associated AWS user must have been granted permissions to operate on the cloud resources directly. When an assumed role is provided, the IAM user keys are only used to retrieve temporary credentials impersonating the role specified: this could be useful when e.g. multiple IAM users are used to access the same AWS account, and the actual permissions to operate on the resources are only granted to the role.
:::
1. Select a **Region**, e.g., _eu-west-1 - Europe (Ireland)_. This region must match the location of the S3 bucket or EFS/FSx file system you plan to use as work directory.
-1. In the **Pipeline work directory** field type or select from the dropdown menu the S3 bucket [previously created](#s3-bucket-creation), e.g., `s3://seqera-bucket`. The work directory can be customized to specify a folder inside the bucket where Nextflow intermediate files will be stored, e.g., `s3://seqera-bucket/nextflow-workdir`. The bucket must be located in the same region chosen in the previous step.
+1. In the **Pipeline work directory** field type or select from the dropdown menu the [S3 bucket you created](/platform-cloud/integrations/cloud-providers/aws/data-access#s3-bucket-creation), e.g., `s3://seqera-bucket`. The work directory can be customized to specify a folder inside the bucket where Nextflow intermediate files will be stored, e.g., `s3://seqera-bucket/nextflow-workdir`. The bucket must be located in the same region chosen in the previous step.
:::note
When you specify an S3 bucket as your work directory, this bucket is used for the Nextflow [cloud cache](https://docs.seqera.io/nextflow/cache-and-resume#cache-stores) by default. Seqera adds a `cloudcache` block to the Nextflow configuration file for all runs executed with this compute environment. This block includes the path to a `cloudcache` folder in your work directory, e.g., `s3://seqera-bucket/cloudcache/.cache`. You can specify an alternative cache location with the **Nextflow config file** field on the pipeline [launch](../launch/launchpad#launch-form) form.
@@ -961,11 +281,11 @@ Seqera Platform compute environments for AWS Batch include advanced options to c
## Manual configuration of Batch resources
-This section is for users with a pre-configured AWS environment: follow the [AWS Batch queue and compute environment creation instructions](../enterprise/advanced-topics/manual-aws-batch-setup.mdx) to set up the required AWS Batch resources in your account.
+This section is for users with a pre-configured AWS environment: follow the [AWS Batch manual setup](/platform-cloud/integrations/cloud-providers/aws/manual-setup) to set up the required AWS Batch resources in your account.
-A [S3 bucket](#s3-bucket-creation) or EFS/FSx file system is required to store Nextflow intermediate files when using Seqera with AWS Batch.
+An [S3 bucket](/platform-cloud/integrations/cloud-providers/aws/data-access#s3-bucket-creation) or EFS/FSx file system is required to store Nextflow intermediate files when using Seqera with AWS Batch.
-Refer to the [IAM user creation](#iam-user-creation) section to ensure that your IAM user has the necessary permissions to run pipelines in Seqera Platform. Remove any permissions that are not required for your use case.
+Refer to the [IAM user creation](/platform-cloud/integrations/cloud-providers/aws/credentials#iam-user-creation) section to ensure that your IAM user has the necessary permissions to run pipelines in Seqera Platform. Remove any permissions that are not required for your use case.
### Seqera manual compute environment
@@ -981,12 +301,12 @@ AWS Batch creates resources that you may be charged for in your AWS account. See
1. Select **AWS Batch** as the target platform.
1. From the **Credentials** drop-down, select existing AWS credentials, or select **+** to add new credentials. If you're using existing credentials, skip to step 9.
:::note
- You can create multiple credentials in your Seqera environment. See [Credentials](../credentials/overview).
+ You can create multiple credentials in your Seqera environment. See [Credentials](/platform-cloud/integrations/overview).
:::
1. Enter a name, e.g., _AWS Credentials_.
1. Under **AWS credential mode**, select **Keys** or **Role**.
1. For **Keys** mode:
- - Add the **Access key** and **Secret key** you [previously obtained](#obtain-iam-user-credentials).
+ - Add the **Access key** and **Secret key** [obtained from the AWS IAM console](/platform-cloud/integrations/cloud-providers/aws/credentials#obtain-iam-user-credentials).
- Optionally paste the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**.
- If you paste a role ARN in **Assume role**, the **Generate External ID** switch is displayed. Generating an External ID is optional in **Keys** mode.
- If **Generate External ID** is selected, an External ID is automatically generated and shown after you save the credential.
@@ -997,7 +317,7 @@ AWS Batch creates resources that you may be charged for in your AWS account. See
When using AWS keys without an assumed role, the associated AWS user must have been granted permissions to operate on the cloud resources directly. When an assumed role is provided, the IAM user keys are only used to retrieve temporary credentials impersonating the role specified: this could be useful when e.g. multiple IAM users are used to access the same AWS account, and the actual permissions to operate on the resources are only granted to the role.
:::
1. Select a **Region**, e.g., _eu-west-1 - Europe (Ireland)_. This region must match the region where your S3 bucket or EFS/FSx work directory is located to avoid high data transfer costs.
-1. Enter or select from the dropdown menu the S3 bucket [previously created](#s3-bucket-creation) in the **Pipeline work directory** field, e.g., `s3://seqera-bucket`. This bucket must be in the same region chosen in the previous step to avoid incurring high data transfer costs. The work directory can be customized to specify a folder inside the bucket, e.g., `s3://seqera-bucket/nextflow-workdir`.
+1. Enter or select from the dropdown menu the [S3 bucket you created](/platform-cloud/integrations/cloud-providers/aws/data-access#s3-bucket-creation) in the **Pipeline work directory** field, e.g., `s3://seqera-bucket`. This bucket must be in the same region chosen in the previous step to avoid incurring high data transfer costs. The work directory can be customized to specify a folder inside the bucket, e.g., `s3://seqera-bucket/nextflow-workdir`.
:::note
When you specify an S3 bucket as your work directory, this bucket is used for the Nextflow [cloud cache](https://docs.seqera.io/nextflow/cache-and-resume#cache-stores) by default. Seqera adds a `cloudcache` block to the Nextflow configuration file for all runs executed with this compute environment. This block includes the path to a `cloudcache` folder in your work directory, e.g., `s3://seqera-bucket/cloudcache/.cache`. You can specify an alternative cache location with the **Nextflow config file** field on the pipeline [launch](../launch/launchpad#launch-form) form.
:::
@@ -1049,7 +369,7 @@ AWS Batch creates resources that you may be charged for in your AWS account. See
1. Select **Enable Fusion Snapshots (beta)** to enable Fusion to automatically restore jobs that are interrupted when an AWS Spot instance reclamation occurs. Requires Fusion v2. See [Fusion Snapshots](https://docs.seqera.io/fusion/guide/snapshots) for more information.
1. Set the **Config mode** to **Manual**.
-1. Enter the **Head queue** created following the [instructions](../enterprise/advanced-topics/manual-aws-batch-setup.mdx), which is the name of the AWS Batch queue that the Nextflow main job will run.
+1. Enter the **Head queue** created following the [AWS Batch manual setup instructions](/platform-cloud/integrations/cloud-providers/aws/manual-setup), which is the name of the AWS Batch queue that the Nextflow main job will run.
1. Enter the **Compute queue**, which is the name of the AWS Batch queue where tasks will be submitted.
1. Apply [**Resource labels**](../resource-labels/overview) to the cloud resources produced by this compute environment. Workspace default resource labels are prefilled.
1. Expand **Staging options** to include:
diff --git a/platform-cloud/docs/compute-envs/aws-cloud.md b/platform-cloud/docs/compute-envs/aws-cloud.md
index ecf633381..0a262ebe6 100644
--- a/platform-cloud/docs/compute-envs/aws-cloud.md
+++ b/platform-cloud/docs/compute-envs/aws-cloud.md
@@ -58,394 +58,17 @@ The following regions are currently supported:
- `us-west-1`
- `us-west-2`
-## Required Platform IAM permissions
+## Before you start
-To create and launch pipelines, explore buckets with Data Explorer or run Studio sessions with the AWS Cloud compute environment, an IAM user with specific permissions must be provided. Some permissions are mandatory for the compute environment to be created and function correctly, while others are optional and used for example to provide list of values to pick from in the Platform UI.
+Set up the [AWS integration](/platform-cloud/integrations/cloud-providers/aws/overview) before creating an AWS Cloud compute environment in Seqera:
-Permissions can be attached directly to an [IAM user](#iam-user-creation), or to an [IAM role](#iam-role-creation-optional) that the IAM user can assume when accessing AWS resources.
+- [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) — required permissions (select the **AWS Cloud** tab).
+- [AWS credentials](/platform-cloud/integrations/cloud-providers/aws/credentials) — IAM policy, IAM user, IAM role, and how to add credentials in Seqera.
-A permissive and broad policy with all the required permissions is provided here for a quick start. However, follow the principle of least privilege and only grant the necessary permissions for your use case, as shown in the following sections.
-
-
-Full permissive policy (For reference)
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "AwsCloudCreate",
- "Effect": "Allow",
- "Action": [
- "iam:CreateRole",
- "iam:AddRoleToInstanceProfile",
- "iam:CreateInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:PutRolePolicy",
- "iam:TagRole",
- "iam:TagInstanceProfile"
- ],
- "Resource": [
- "arn:aws:iam::*:role/TowerForge-*",
- "arn:aws:iam::*:instance-profile/TowerForge-*"
- ]
- },
- {
- "Sid": "AwsCloudCreatePassRole",
- "Effect": "Allow",
- "Action": [
- "iam:PassRole"
- ],
- "Resource": "arn:aws:iam::*:role/TowerForge-*"
- },
- {
- "Sid": "AwsCloudLaunchEC2",
- "Effect": "Allow",
- "Action": [
- "ec2:CreateTags",
- "ec2:DeleteTags",
- "ec2:DescribeInstances",
- "ec2:RunInstances",
- "ec2:TerminateInstances"
- ],
- "Resource": "*"
- },
- {
- "Sid": "AwsCloudLaunchLogs",
- "Effect": "Allow",
- "Action": [
- "logs:GetLogEvents"
- ],
- "Resource": "arn:aws:logs:*:*:log-group:*:log-stream:*"
- },
- {
- "Sid": "AwsCloudLaunchS3",
- "Effect": "Allow",
- "Action": [
- "s3:GetObject"
- ],
- "Resource": "*"
- },
- {
- "Sid": "AwsCloudDelete",
- "Effect": "Allow",
- "Action": [
- "iam:GetRole",
- "iam:ListAttachedRolePolicies",
- "iam:ListRolePolicies",
- "iam:DeleteRole",
- "iam:DeleteInstanceProfile",
- "iam:RemoveRoleFromInstanceProfile",
- "iam:DetachRolePolicy",
- "iam:DeleteRolePolicy"
- ],
- "Resource": [
- "arn:aws:iam::*:role/TowerForge-*",
- "arn:aws:iam::*:instance-profile/TowerForge-*"
- ]
- },
- {
- "Sid": "AwsCloudRead",
- "Effect": "Allow",
- "Action": [
- "ec2:DescribeInstanceTypes",
- "ec2:DescribeKeyPairs",
- "ec2:DescribeVpcs",
- "ec2:DescribeImages",
- "ec2:DescribeSubnets",
- "ec2:DescribeSecurityGroups",
- "s3:ListAllMyBuckets"
- ],
- "Resource": "*"
- }
- ]
-}
-```
-
-
-
-### Compute environment creation
-
-The following permissions are required to provision resources in the AWS account. Only IAM roles that will be assumed by the EC2 instance must be provisioned:
-
-```json
-{
- "Sid": "AwsCloudCreate",
- "Effect": "Allow",
- "Action": [
- "iam:CreateRole",
- "iam:AddRoleToInstanceProfile",
- "iam:CreateInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:PutRolePolicy",
- "iam:TagRole",
- "iam:TagInstanceProfile"
- ],
- "Resource": [
- "arn:aws:iam::*:role/TowerForge-*",
- "arn:aws:iam::*:instance-profile/TowerForge-*"
- ]
-},
-{
- "Sid": "AwsCloudCreatePassRole",
- "Effect": "Allow",
- "Action": [
- "iam:PassRole"
- ],
- "Resource": "arn:aws:iam::*:role/TowerForge-*"
-}
-```
-
-### Compute environment validation
-
-The following permissions are required to validate the compute environment at creation time. Seqera validates the input provided and that the resource ARNs exist in the target AWS account:
-
-```json
-{
- "Sid": "AwsCloudValidate",
- "Effect": "Allow",
- "Action": [
- "ec2:DescribeInstanceTypes",
- "ec2:DescribeImages",
- "ec2:DescribeSubnets",
- "ec2:DescribeSecurityGroups"
- ],
- "Resource": "*"
-}
-```
-
-### Pipeline and Studio session management
-
-The following permissions are required to launch pipelines, run Studio sessions, fetch live execution logs from CloudWatch, download logs from S3, and stop the execution:
-
-```json
-{
- "Sid": "AwsCloudLaunchEC2",
- "Effect": "Allow",
- "Action": [
- "ec2:CreateTags",
- "ec2:DeleteTags",
- "ec2:DescribeInstances",
- "ec2:RunInstances",
- "ec2:TerminateInstances"
- ],
- "Resource": "*"
-},
-{
- "Sid": "AwsCloudLaunchLogs",
- "Effect": "Allow",
- "Action": [
- "logs:GetLogEvents"
- ],
- "Resource": "arn:aws:logs:*:*:log-group:*:log-stream:*"
-},
-{
- "Sid": "AwsCloudLaunchS3",
- "Effect": "Allow",
- "Action": [
- "s3:GetObject"
- ],
- "Resource": "arn:aws:s3:::/WORKDIR/*"
-}
-```
-
-### Compute environment termination and resource disposal
-
-The following permissions are required to remove resources created by Seqera when the compute environment is deleted:
-
-```json
-{
- "Sid": "AwsCloudDelete",
- "Effect": "Allow",
- "Action": [
- "iam:GetRole",
- "iam:ListAttachedRolePolicies",
- "iam:ListRolePolicies",
- "iam:DeleteRole",
- "iam:DeleteInstanceProfile",
- "iam:RemoveRoleFromInstanceProfile",
- "iam:DetachRolePolicy",
- "iam:DeleteRolePolicy"
- ],
- "Resource": [
- "arn:aws:iam::*:role/TowerForge-*",
- "arn:aws:iam::*:instance-profile/TowerForge-*"
- ]
-}
-```
-
-### Optional permissions
-
-The following permissions enable Seqera to populate values for dropdown fields. If missing, the input fields will not be auto-populated but can still be manually entered. Though optional, these permissions are recommended for a smoother and less error-prone user experience:
-
-```json
-{
- "Sid": "AwsCloudRead",
- "Effect": "Allow",
- "Action": [
- "ec2:DescribeInstanceTypes",
- "ec2:DescribeKeyPairs",
- "ec2:DescribeVpcs",
- "ec2:DescribeImages",
- "ec2:DescribeSubnets",
- "ec2:DescribeSecurityGroups",
- "s3:ListAllMyBuckets"
- ],
- "Resource": "*"
-}
-```
-
-## Create the IAM policy
-
-The policy above must be created in the AWS account where the AWS Batch resources need to be created.
-
-1. Open the [AWS IAM console](https://console.aws.amazon.com/iam) in the account where you want to create the AWS Batch resources.
-1. From the left navigation menu, select **Policies** under **Access management**.
-1. Select **Create policy**.
-1. On the **Policy editor** section, select the **JSON** tab.
-1. Following the instructions detailed in the [IAM permissions breakdown section](#required-platform-iam-permissions) replace the default text in the policy editor area under the **JSON** tab with a policy adapted to your use case, then select **Next**.
-1. Enter a name and description for the policy on the **Review and create** page, then select **Create policy**.
-
-## IAM user creation
-
-Seqera requires an Identity and Access Management (IAM) User to create and manage AWS Batch resources in your AWS account. We recommend creating a separate IAM policy rather than an IAM User inline policy, as the latter only allows 2048 characters, which may not be sufficient for all the required permissions.
-
-In certain scenarios, for example when multiple users need to access the same AWS account and provision AWS Batch resources, an IAM role with the required permissions can be created instead, and the IAM user can assume that role when accessing AWS resources, as detailed in the [IAM role creation (optional)](#iam-role-creation-optional) section.
-
-Depending whether you choose to let Seqera automatically create the required AWS Batch resources in your account, or prefer to set them up manually, the IAM user must have specific permissions as detailed in the [Required Platform IAM permissions](#required-platform-iam-permissions) section. Alternatively, you can create an IAM role with the required permissions and allow the IAM user to assume that role when accessing AWS resources, as detailed in the [IAM role creation (optional)](#iam-role-creation-optional) section.
-
-### Create an IAM user
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select **Create User** at the top right of the page.
-1. Enter a name for your user (e.g., _seqera_) and select **Next**.
-1. Under **Permission options**, select **Attach policies directly**, then search for and select the policy created above, and select **Next**.
- * If you prefer to make the IAM user assume a role to manage AWS resources (see the [IAM role creation (optional)](#iam-role-creation-optional) section), create a policy with the following content (edit the AWS principal with the ARN of the role created) and attach it to the IAM user:
-
- ```json
- {
- "Sid": "AssumeRoleToManageBatchResources",
- "Effect": "Allow",
- "Action": "sts:AssumeRole",
- "Resource": "arn:aws:iam:::role/",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- }
- ```
-1. On the last page, review the user details and select **Create user**.
-
-The user has now been created. The most up-to-date instructions for creating an IAM user can be found in the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html).
-
-### Obtain IAM user credentials
-
-To get the credentials needed to connect Seqera to your AWS account, follow these steps:
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select the newly created user from the users table.
-1. Select the **Security credentials** tab, then select **Create access key** under the **Access keys** section.
-1. In the **Use case** dialog that appears, select **Command line interface (CLI)**, then tick the confirmation checkbox at the bottom to acknowledge that you want to proceed creating an access key, and select **Next**.
-1. Optionally provide a description for the access key, like the reason for creating it, then select **Create access key**.
-1. Save the **Access key** and **Secret access key** in a secure location as you will need to provide them when creating credentials in Seqera.
-
-## IAM role creation (optional)
-
-Rather than attaching permissions directly to the IAM user, you can create an IAM role with the required permissions and allow the IAM user to assume that role when accessing AWS resources. This is useful when multiple IAM users are used to access the same AWS account: this way the actual permissions to operate on the resources are only granted to a single centralized role.
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Roles** in the left navigation menu, then select **Create role** at the top right of the page.
-1. Select **Custom trust policy** as the type of trusted entity, provide the following policy and edit the AWS principal with the ARN of the IAM user created in the [IAM user creation](#iam-user-creation) section, then select **Next**.
- ```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": [
- "arn:aws:iam:::user/"
- ]
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- }
- ]
- }
- ```
-1. On the **Permissions** page, search for and select the policy created in the [IAM user creation](#iam-user-creation) section, then select **Next**.
-1. Give the role a name and optionally a description, review the details of the role, optionally provide tags to help you identify the role, then select **Create role**.
-
-Multiple users can be specified in the trust policy by adding more ARNs to the `Principal` section.
-
-:::note
-Seqera Platform generates the `External ID` value during AWS credential creation. For role-based credentials, use this exact value in your IAM trust policy (`sts:ExternalId`).
-:::
-
-### Role-based trust policy example (Seqera Cloud)
-
-For role-based AWS credentials in Seqera Cloud, allow the Seqera Cloud access role `arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole` in your trust policy and enforce the `External ID` generated during credential creation:
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- },
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
- },
- "Action": "sts:TagSession"
- }
- ]
-}
-```
-
-## AWS credential options
-
-AWS credentials can be configured in two ways:
-
-- **Key-based credentials**: Access key and secret key with direct IAM permissions. If you provide a role ARN in **Assume role**, the **Generate External ID** switch is displayed and External ID generation is optional.
-- **Role-based credentials (recommended)**: Use role assumption only (no static keys). Paste the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**. External ID is generated automatically when you save.
-
-Use the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**. This field is available for both key-based and role-based credentials. It is optional for key-based credentials and required for role-based credentials.
-
-Existing credentials created before March 2026 continue to work without changes.
-
-## Managed Amazon Machine Image (AMI)
-
-The AWS Cloud compute environment uses a public AMI maintained by Seqera, and the pipeline launch procedure assumes that some basic tooling is already present in the image itself. If you want to provide your own AMI, it must include at least the following:
-
-- Docker engine, configured to run at startup.
-- CloudWatch agent.
-- The ability to shut down with the `shutdown` command. If this is missing, EC2 instances will keep running and accumulate additional costs.
-
-### Release cadence and software updates
-
-The AMI is based on the [Amazon Linux 2023 image](https://docs.aws.amazon.com/linux/al2023/ug/what-is-amazon-linux.html). System package versions are pinned for each specific Amazon Linux 2023 version. Seqera subscribes to the [AWS SNS topic](https://docs.aws.amazon.com/linux/al2023/ug/receive-update-notification.html) to receive Amazon Linux 2023 update notifications. When updates are available, this triggers a new Seqera AMI release built on the latest image, which includes system package updates and security patches.
-
-## Advanced options
-
-- **Instance Type**: The EC2 instance type used by the compute environment. Choosing the instance type will directly allocate the CPU and memory available for computation. See [EC2 instance types](https://aws.amazon.com/ec2/instance-types/) for a comprehensive list of instance types and their resource limitations.
-- **Graviton architecture**: Enable the use of Graviton instances. AWS Graviton processors, based on the ARM64 architecture, tend to offer a better performance-to-price ratio, however, the tooling used by your pipelines must be compatible with ARM architecture.
-- **AMI ID**: The ID of the AMI that will be used to launch the EC2 instance. Use Seqera-maintained AMIs for best performance.
-- **Key pair**: The [EC2 key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) to enable SSH connectivity to the running instance. If unspecified, no SSH key will be present in the running EC2 instance.
-- **VPC ID**: The ID of the VPC where the EC2 instance will be launched. If unspecified, the default VPC will be used.
-- **Subnets**: The list of VPC subnets where the EC2 instance will run. If unspecified, all the subnets of the VPC will be used.
-- **Security groups**: The security groups the EC2 instance will be a part of. If unspecified, no security groups will be used.
-- **Instance Profile**: The ARN of the `InstanceProfile` used by the EC2 instance to assume a role while running. If unspecified, Seqera will provision one with enough permissions to run.
-- **Boot disk size**: The size of the EBS boot disk for the EC2 instance. If undefined, a default 50 GB `gp3` volume will be used.
+{/* Anchor stubs preserved for backwards compatibility with deep links from older content. */}
+
+
+
+
+
+
diff --git a/platform-cloud/docs/compute-envs/azure-batch.md b/platform-cloud/docs/compute-envs/azure-batch.md
index 8de2cf916..5f887354e 100644
--- a/platform-cloud/docs/compute-envs/azure-batch.md
+++ b/platform-cloud/docs/compute-envs/azure-batch.md
@@ -30,183 +30,26 @@ Azure uses accounts for each service. For example, an [Azure Storage account][az
An Azure service principal is an identity created specifically for applications, hosted services, or automated tools to access Azure resources. It acts like a user identity with a defined set of permissions, enabling resources authenticated through the service principal to perform actions within the Azure account. Seqera can utilize an Azure service principal to authenticate and access Azure Batch for job execution and Azure Storage for data management.
-## Create Azure resources
-
-### Resource group
-
-Create a resource group to link your Azure Batch and Azure Storage account:
-
-:::note
-A resource group can be created while creating an Azure Storage account or Azure Batch account.
-:::
-
-1. Log in to your Azure account, go to the [Create Resource group][az-create-rg] page, and select **Create new resource group**.
-1. Enter a name for the resource group, such as _seqeracompute_.
-1. Choose the preferred region.
-1. Select **Review and Create** to proceed.
-1. Select **Create**.
-
-### Storage account
-
-After creating a resource group, set up an [Azure Storage account][az-learn-storage]:
-
-1. Log in to your Azure account, go to the [Create storage account][az-create-storage] page, and select **Create a storage account**.
- :::note
- If you haven't created a resource group, you can do so now.
- :::
-1. Enter a name for the storage account, such as _seqeracomputestorage_.
-1. Choose the preferred region. This must be the same region as the Batch account.
-1. Platform supports all performance or redundancy settings — select the most appropriate settings for your use case.
-1. Select **Next: Advanced**.
-1. Enable _storage account key access_.
-1. Select **Next: Networking**.
- - Enable public access from all networks. You can enable public access from selected virtual networks and IP addresses, but you will be unable to use Forge to create compute resources. Disabling public access is not supported.
-1. Select **Data protection**.
- - Configure appropriate settings. All settings are supported by the platform.
-1. Select **Encryption**.
- - Only Microsoft-managed keys (MMK) are supported.
-1. In **tags**, add any required tags for the storage account.
-1. Select **Review and Create**.
-1. Select **Create** to create the Azure Storage account.
- - You will need at least one Blob Storage container to act as a working directory for Nextflow.
-1. Go to your new storage account and select **+ Container** to create a new Blob Storage container. A new container dialogue will open. Enter a suitable name, such as _seqeracomputestorage-container_.
-1. Go to the **Access Keys** section of your new storage account (_seqeracomputestorage_ in this example).
-1. Store the access keys for your Azure Storage account, to be used when you create a Seqera compute environment.
-
-:::caution
-Blob container storage credentials are associated with the Batch pool configuration. Avoid changing these credentials in your Seqera instance after you have created the compute environment.
-:::
-
-### Batch account
-
-After you have created a resource group and Storage account, create a [Batch account][az-learn-batch]:
-
-1. Log in to your Azure account and select **Create a batch account** on [this page][az-create-batch].
-1. Select the existing resource group or create a new one.
-1. Enter a name for the Batch account, such as _seqeracomputebatch_.
-1. Choose the preferred region. This must be the same region as the Storage account.
-1. Select **Advanced**.
-1. For **Pool allocation mode**, select **Batch service**.
-1. For **Authentication mode**, select _Shared Key_.
-1. Select **Networking**. Ensure networking access is sufficient for Platform and any additional required resources.
-1. Add any **Tags** to the Batch account, if needed.
-1. Select **Review and Create**.
-1. Select **Create**.
-1. Go to your new Batch account, then select **Access Keys**.
-1. Store the access keys for your Azure Batch account, to be used when you create a Seqera compute environment.
- :::caution
- A newly-created Azure Batch account may not be entitled to create virtual machines without making a service request to Azure.
- See [Azure Batch service quotas and limits][az-batch-quotas] for more information.
- :::
-1. Select the **+ Quotas** tab of the Azure Batch account to check and increase existing quotas if necessary.
-1. Select **+ Request quota increase** and add the quantity of resources you require. Here is a brief guideline:
- - **Active jobs and schedules**: Each Nextflow process will require an active Azure Batch job per pipeline while running, so increase this number to a high level. See [here][az-learn-jobs] to learn more about jobs in Azure Batch.
- - **Pools**: Each platform compute environment requires at least one Azure Batch pool. Batch Forge creates two pools by default (one for the head job and one for compute tasks). Each pool is composed of multiple machines of one virtual machine size.
- :::note
- To use separate pools for head and compute nodes, see [this FAQ entry](../troubleshooting_and_faqs/azure_troubleshooting).
- :::
- - **Batch accounts per region per subscription**: Set this to the number of Azure Batch accounts per region per subscription. Only one is required.
- - **Total Dedicated vCPUs per VM series**: See the Azure documentation for [virtual machine sizes][az-vm-sizes] to help determine the machine size you need. We recommend the latest version of the ED series available in your region as a cost-effective and appropriately-sized machine for running Nextflow. However, you will need to select alternative machine series that have additional requirements, such as those with additional GPUs or faster storage. Increase the quota by the number of required concurrent CPUs. In Azure, machines are charged per cpu minute so there is no additional cost for a higher number.
-
-### Credentials
-
-There are two types of Azure credentials available: access keys and Entra service principals.
-
-Access keys are simple to use but have several limitations:
-
-- Access keys are long-lived.
-- Access keys provide full access to the Azure Storage and Azure Batch accounts.
-- Azure allows only two access keys per account, making them a single point of failure.
-- Access keys do not support VNet/subnet configuration.
-
-Entra service principals are accounts which can be granted access to Azure Batch and Azure Storage resources:
-
-- Service principals enable role-based access control with more precise permissions.
-- Service principals map to a many-to-many relationship with Azure Batch and Azure Storage accounts.
-- Some Azure Batch features, such as VNet/subnet configuration, are only available when using Microsoft Entra.
-
-Both credential types support Batch Forge and Manual compute environment modes.
-
-:::note
-The two Azure credential types use different authentication methods. You can add more than one credential to a workspace, but Platform compute environments use only one credential at any given time. While separate credentials can be used by separate compute environments concurrently, they are not cross-compatible — access granted by one credential will not be shared with the other.
-:::
-
-#### Access keys
-
-To create an access key:
-
-1. Navigate to the Azure Portal and sign in.
-1. Locate the Azure Batch account and select **Keys** under **Account management**. The Primary and Secondary keys are listed here. Copy one of the keys and save it in a secure location for later use.
-1. Locate the Azure Storage account and, under the **Security and Networking** section, select **Access keys**. Key1 and Key2 options are listed here. Copy one of them and save it in a secure location for later use.
-1. In your Platform workspace **Credentials** tab, select the **Add credentials** button and complete the following fields:
- - Enter a **Name** for the credentials
- - **Provider**: Azure
- - Select the **Shared key** tab
- - Add the **Batch account** and **Blob Storage account** names and access keys to the relevant fields.
-1. Delete the copied keys from their temporary location after they have been added to a credential in Platform.
-
-#### Entra service principal and managed identity
-
-To use Entra for authentication, you must create a service principal and managed identity. Seqera uses the service principal to authenticate to Azure Batch and Azure Storage. It submits a Nextflow task as the head process to run Nextflow, which authenticates to Azure Batch and Storage using the managed identity attached to the node pool.
-
-Therefore, you must create both an Entra service principal and a managed identity:
-
-1. Add the service principal details as credentials in Seqera Platform.
-2. Assign the managed identity to each Azure Batch node pool with the relevant permissions.
-3. When using Batch Forge, provide the managed identity resource ID for each managed identity. Seqera Platform assigns the identity to each pool during creation.
-
-:::note
-Entra service principal credentials support both Batch Forge and Manual compute environments. Some features, such as VNet/subnet configuration, require Entra credentials. When using Entra credentials, a managed identity is recommended for best security practices, but is not mandatory.
-:::
-
-##### Service principal
-
-See [Create a service principal][az-create-sp] for more details.
-
-To create an Entra service principal:
-
-1. In the Azure Portal, navigate to **Microsoft Entra ID**. Under **App registrations**, select **New registration**.
-1. Provide a name for the application. The application will automatically have a service principal associated with it.
-1. Assign roles to the service principal:
- 1. Go to the Azure Storage account. Under **Access Control (IAM)**, select **Add role assignment**.
- 1. Select the **Storage Blob Data Reader** and **Storage Blob Data Contributor** roles.
- 1. Select **Members**, then **Select Members**. Search for your newly created service principal and assign the role.
- 1. Repeat the same process for the Azure Batch account, using the **Azure Batch Data Contributor** role.
-1. Platform will need credentials to authenticate as the service principal:
- 1. Navigate back to the app registration. On the **Overview** page, save the **Application (client) ID** value for use in Platform.
- 1. Select **Certificates & secrets**, then **New client secret**. A new secret is created containing a value and secret ID. Save both values securely for use in Platform.
-1. In your Platform workspace **Credentials** tab, select the **Add credentials** button and complete the following fields:
- - Enter a **Name** for the credentials
- - **Provider**: Azure
- - Select the **Entra** tab
- - Complete the remaining fields: **Batch account name**, **Blob Storage account name**, **Tenant ID** (Application (client) ID in Azure), **Client ID** (Client secret ID in Azure), **Client secret** (Client secret value in Azure).
-1. Delete the ID and secret values from their temporary location after they have been added to a credential in Platform.
-
-##### Managed identity
-
-:::info
-To use managed identities, Seqera requires Nextflow version 24.06.0-edge or later.
-:::
-
-Nextflow can authenticate to Azure services using a managed identity. This method offers enhanced security compared to access keys, but must run on Azure infrastructure.
-
-When you use a compute environment with a managed identity attached to the Azure Batch Pool, Nextflow can use this managed identity for authentication. However, Seqera still needs to use access keys or an Entra service principal to submit the initial task to Azure Batch to run Nextflow, which will then proceed with the managed identity for subsequent authentication.
-
-1. In Azure, create a user-assigned managed identity. See [Manage user-assigned managed identities](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/how-manage-user-assigned-managed-identities) for detailed steps. Take note of both the **client ID** and the **resource ID** of the managed identity when you create it.
-1. The user-assigned managed identity must have the necessary access roles for Nextflow. See [Required role assignments](https://docs.seqera.io/nextflow/azure#required-role-assignments) for more information.
-1. Associate the user-assigned managed identity with the Azure Batch Pool. See [Set up managed identity in your Batch pool](https://learn.microsoft.com/en-us/troubleshoot/azure/hpc/batch/use-managed-identities-azure-batch-account-pool#set-up-managed-identity-in-your-batch-pool) for more information.
-
- :::note
- When you use separate head and worker pools, you can assign separate managed identities to the head and compute pools. Each pool receives only the managed identity relevant to its role.
- :::
-
-1. When you set up the Seqera compute environment, select the Azure Batch pool by name and enter the managed identity **client ID** and (optionally) the **resource ID** in the specified fields. The resource ID is the full ARM path of the managed identity (e.g., `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}`).
-
-When you submit a pipeline to this compute environment, Nextflow will authenticate using the managed identity associated with the Azure Batch node it runs on, rather than relying on access keys.
-
-:::caution
-If a managed identity is misconfigured (e.g., invalid client ID or missing RBAC roles), the pipeline will fail with an explicit error. Seqera will not silently fall back to access key authentication.
-:::
+## Before you start
+
+Set up the [Azure integration](/platform-cloud/integrations/cloud-providers/azure/overview) before creating an Azure Batch compute environment in Seqera:
+
+- [Storage access](/platform-cloud/integrations/cloud-providers/azure/storage-access) — resource group, Storage account, Blob container, and Batch account prerequisites.
+- [Roles & service principals](/platform-cloud/integrations/cloud-providers/azure/roles-and-service-principals) — required role assignments and service principal registration.
+- [Azure credentials](/platform-cloud/integrations/cloud-providers/azure/credentials) — access keys, Entra service principals, and managed identities.
+- [Azure Batch manual setup](/platform-cloud/integrations/cloud-providers/azure/manual-setup) — manual pool wiring and advanced scenarios.
+
+{/* Anchor stubs preserved for backwards compatibility with deep links from older content. */}
+
+
+
+
+
+
+
+
+
+
## Add Seqera compute environment
@@ -318,7 +161,7 @@ Create a Batch Forge Azure Batch compute environment:
Batch Forge creates separate Azure Batch pools for the Nextflow head job and compute tasks by default (named `tower-pool-{envId}-head` and `tower-pool-{envId}-worker`). This prevents the head node from competing for resources with compute tasks and allows independent sizing of each pool.
:::
-1. Select or create [**Container registry credentials**](../credentials/azure_registry_credentials) to authenticate a registry (used by the [Wave containers](https://docs.seqera.io/nextflow/wave) service). It is recommended to use an [Azure Container registry](https://azure.microsoft.com/en-gb/products/container-registry) within the same region for maximum performance.
+1. Select or create [**Container registry credentials**](/platform-cloud/integrations/container-registries/azure) to authenticate a registry (used by the [Wave containers](https://docs.seqera.io/nextflow/wave) service). It is recommended to use an [Azure Container registry](https://azure.microsoft.com/en-gb/products/container-registry) within the same region for maximum performance.
1. Apply [**Resource labels**](../resource-labels/overview). This will populate the **Metadata** fields of the Azure Batch pools and jobs.
1. Expand **Staging options** to include:
- Optional [pre- or post-run Bash scripts](../launch/advanced#pre-and-post-run-scripts) that execute before or after the Nextflow pipeline execution in your environment.
@@ -437,7 +280,7 @@ The following settings can be modified after creating a pool:
1. Select your existing Azure credentials (access keys or Entra service principal) or select **+** to add new credentials.
:::note
- Both access keys and Entra service principal credentials are supported. Some features, such as VNet/subnet configuration, require Entra credentials. To use Entra with a managed identity, see [Managed identity](#managed-identity) below.
+ Both access keys and Entra service principal credentials are supported. Some features, such as VNet/subnet configuration, require Entra credentials. To use Entra with a managed identity, see [Managed identity](/platform-cloud/integrations/cloud-providers/azure/credentials#managed-identity-azure-batch).
:::
1. Select a **Region**, such as _eastus (East US)_.
1. In the **Work directory** field, add the Azure blob container created previously. For example, `az://seqeracomputestorage-container/work`.
@@ -474,7 +317,7 @@ The following settings can be modified after creating a pool:
:::note
The default Azure Batch implementation uses a single pool for head and compute nodes. To use separate pools for head and compute nodes, see [this FAQ entry](../troubleshooting_and_faqs/azure_troubleshooting).
:::
-1. Enter a user-assigned **Managed identity client ID**, if one is attached to your Azure Batch pool. See [Managed Identity](#managed-identity) below.
+1. Enter a user-assigned **Managed identity client ID**, if one is attached to your Azure Batch pool. See [Managed identity](/platform-cloud/integrations/cloud-providers/azure/credentials#managed-identity-azure-batch).
1. Apply [**Resource labels**](../resource-labels/overview). This will populate the **Metadata** fields of the Azure Batch pool.
1. Expand **Staging options** to include:
- Optional [pre- or post-run Bash scripts](../launch/advanced#pre-and-post-run-scripts) that execute before or after the Nextflow pipeline execution in your environment.
diff --git a/platform-cloud/docs/compute-envs/azure-cloud.md b/platform-cloud/docs/compute-envs/azure-cloud.md
index aea357d0b..0f4bf9062 100644
--- a/platform-cloud/docs/compute-envs/azure-cloud.md
+++ b/platform-cloud/docs/compute-envs/azure-cloud.md
@@ -53,365 +53,22 @@ Nextflow_log_CL | where workflowId == ""
The table retains logs for 7 days. Nextflow uploads log files to Azure Storage for long-term storage.
-## Requirements
-
-### Platform credentials
-
-To create and launch pipelines or Studio sessions with Azure Cloud compute environments, you must attach Seqera credentials with an Entra client ID/client secret pair. These credentials must also include your Azure subscription ID and Storage account configuration.
-
-See [Register an application in Microsoft Entra ID](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app) and [Add and manage application credentials in Microsoft Entra ID](https://learn.microsoft.com/en-us/entra/identity-platform/how-to-add-credentials?tabs=client-secret) for more information.
-
-### Required permissions
-
-For granular control over the permissions granted to Seqera, use [Azure custom roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles) and [assign](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal) them to the service principal. The full role JSON definition is:
-
-```json
-{
- "properties": {
- "roleName": "seqera-azure-cloud",
- "description": "Role assumed by Seqera Platform to create Azure Cloud compute environments",
- "assignableScopes": [
- "/subscriptions/"
- ],
- "permissions": [
- {
- "actions": [
- "Microsoft.Compute/virtualMachines/read",
- "Microsoft.Compute/virtualMachines/write",
- "Microsoft.Compute/virtualMachines/delete",
- "Microsoft.Compute/virtualMachines/deallocate/action",
- "Microsoft.Compute/virtualMachines/attachDetachDataDisks/action",
-
- "Microsoft.Resources/subscriptions/resourceGroups/write",
- "Microsoft.Resources/subscriptions/resourceGroups/read",
- "Microsoft.Resources/subscriptions/resourceGroups/delete",
-
- "Microsoft.Network/publicIPAddresses/read",
- "Microsoft.Network/publicIPAddresses/write",
- "Microsoft.Network/publicIPAddresses/delete",
- "Microsoft.Network/publicIPAddresses/join/action",
-
- "Microsoft.Network/virtualNetworks/read",
- "Microsoft.Network/virtualNetworks/write",
- "Microsoft.Network/virtualNetworks/delete",
- "Microsoft.Network/virtualNetworks/subnets/read",
- "Microsoft.Network/virtualNetworks/subnets/write",
- "Microsoft.Network/virtualNetworks/subnets/delete",
- "Microsoft.Network/virtualNetworks/subnets/join/action",
-
- "Microsoft.Network/networkInterfaces/delete",
- "Microsoft.Network/networkInterfaces/write",
- "Microsoft.Network/networkInterfaces/read",
- "Microsoft.Network/networkInterfaces/join/action",
-
- "Microsoft.ManagedIdentity/userAssignedIdentities/read",
- "Microsoft.ManagedIdentity/userAssignedIdentities/write",
- "Microsoft.ManagedIdentity/userAssignedIdentities/delete",
- "Microsoft.ManagedIdentity/userAssignedIdentities/assign/action",
-
- "Microsoft.Authorization/roleAssignments/read",
- "Microsoft.Authorization/roleAssignments/write",
- "Microsoft.Authorization/roleAssignments/delete",
-
- "Microsoft.Authorization/roleDefinitions/read",
- "Microsoft.Authorization/roleDefinitions/write",
- "Microsoft.Authorization/roleDefinitions/delete",
-
- "Microsoft.Insights/DataCollectionRules/Read",
- "Microsoft.Insights/DataCollectionRules/Write",
- "Microsoft.Insights/DataCollectionRules/Delete",
-
- "Microsoft.Insights/DataCollectionEndpoints/Write",
- "Microsoft.Insights/DataCollectionEndpoints/Delete",
-
- "Microsoft.OperationalInsights/workspaces/write",
- "Microsoft.OperationalInsights/workspaces/read",
- "Microsoft.OperationalInsights/workspaces/delete",
- "Microsoft.OperationalInsights/workspaces/sharedkeys/action",
- "Microsoft.OperationalInsights/workspaces/tables/read",
- "Microsoft.OperationalInsights/workspaces/tables/write",
- "Microsoft.OperationalInsights/workspaces/tables/delete",
- "Microsoft.OperationalInsights/workspaces/query/read",
- "Microsoft.OperationalInsights/workspaces/query/Tables.Custom/read",
-
- "Microsoft.Storage/storageAccounts/blobServices/containers/read",
- "Microsoft.Storage/storageAccounts/blobServices/generateUserDelegationKey/action"
- ],
- "notActions": [],
- "dataActions": [
- "Microsoft.Insights/Telemetry/Write",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write",
- "Microsoft.OperationalInsights/workspaces/tables/data/read"
- ],
- "notDataActions": []
- }
- ]
- }
-}
-```
-
-See [Start from JSON](https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles-portal#start-from-json) to create this custom role in the Azure Portal.
-
-This role definition can be applied as-is for convenience, or it can be broken down into smaller roles. The purpose for each permission is outlined in the following sections.
-
-#### Compute environment creation
-
-The following permissions are required to provision resources in the Azure account when first creating the compute environment:
-
-```json
-{
- "properties": {
- "roleName": "seqera-azure-cloud-create",
- "description": "Role assumed by Seqera Platform to create Azure Cloud compute environments",
- "assignableScopes": [
- "/subscriptions/"
- ],
- "permissions": [
- {
- "actions": [
- "Microsoft.Resources/subscriptions/resourceGroups/read",
- "Microsoft.Resources/subscriptions/resourceGroups/write",
-
- "Microsoft.Storage/storageAccounts/blobServices/containers/read",
- "Microsoft.Network/virtualNetworks/read",
- "Microsoft.Network/virtualNetworks/write",
-
- "Microsoft.Network/virtualNetworks/subnets/read",
- "Microsoft.Network/virtualNetworks/subnets/write",
-
- "Microsoft.ManagedIdentity/userAssignedIdentities/read",
- "Microsoft.ManagedIdentity/userAssignedIdentities/write",
-
- "Microsoft.Authorization/roleAssignments/read",
- "Microsoft.Authorization/roleAssignments/write",
-
- "Microsoft.Authorization/roleDefinitions/read",
- "Microsoft.Authorization/roleDefinitions/write",
-
- "Microsoft.Insights/DataCollectionRules/Read",
- "Microsoft.Insights/DataCollectionRules/Write",
-
- "Microsoft.Insights/DataCollectionEndpoints/Write",
-
- "Microsoft.OperationalInsights/workspaces/read",
- "Microsoft.OperationalInsights/workspaces/write",
- "Microsoft.OperationalInsights/workspaces/tables/write"
- ],
- "notActions": [],
- "dataActions": [],
- "notDataActions": []
- }
- ]
- }
-}
-```
-
-#### Pipeline and Studio launch
-
-The following permissions are required to launch pipelines and Studios:
-
-```json
-{
- "properties": {
- "roleName": "seqera-azure-cloud-launch",
- "description": "Role assumed by Seqera Platform to launch Studios and pipelines on Azure Cloud compute environments",
- "assignableScopes": [
- "/subscriptions/"
- ],
- "permissions": [
- {
- "actions": [
- "Microsoft.Compute/virtualMachines/read",
- "Microsoft.Compute/virtualMachines/write",
- "Microsoft.Compute/virtualMachines/delete",
- "Microsoft.Compute/virtualMachines/deallocate/action",
- "Microsoft.Compute/virtualMachines/attachDetachDataDisks/action",
-
- "Microsoft.Network/publicIPAddresses/read",
- "Microsoft.Network/publicIPAddresses/write",
- "Microsoft.Network/publicIPAddresses/delete",
- "Microsoft.Network/publicIPAddresses/join/action",
-
- "Microsoft.Network/networkInterfaces/read",
- "Microsoft.Network/networkInterfaces/write",
- "Microsoft.Network/networkInterfaces/join/action",
- "Microsoft.Network/virtualNetworks/subnets/join/action",
-
- "Microsoft.ManagedIdentity/userAssignedIdentities/assign/action",
-
- "Microsoft.Insights/DataCollectionRules/Write",
- "Microsoft.Insights/DataCollectionEndpoints/Write"
- ],
- "notActions": [],
- "dataActions": [
- "Microsoft.Insights/Telemetry/Write"
- ],
- "notDataActions": []
- }
- ]
- }
-}
-```
-
-#### Live stream log fetching
-
-The following permissions are required to fetch logs for the pipeline execution while the task is running:
-
-``` json
-{
- "properties": {
- "roleName": "seqera-azure-cloud-logs",
- "description": "Role to be assumed by Seqera Platform to read live-streamed logs for Azure Cloud compute environments",
- "assignableScopes": [
- "/subscriptions/"
- ],
- "permissions": [
- {
- "actions": [
- "Microsoft.OperationalInsights/workspaces/query/read",
- "Microsoft.OperationalInsights/workspaces/query/Tables.Custom/read"
- ],
- "notActions": [],
- "dataActions": [
- "Microsoft.OperationalInsights/workspaces/tables/data/read"
- ],
- "notDataActions": []
- }
- ]
- }
-}
-```
-
-#### Data-links
-
-The following permissions are required to work with [Data Explorer](../data/data-explorer) data-links on Azure:
-
-```json
-{
- "properties": {
- "roleName": "seqera-azure-cloud-data-links",
- "description": "Role assumed by Seqera Platform to access data-links in Azure Cloud compute environments",
- "assignableScopes": [
- "/subscriptions/"
- ],
- "permissions": [
- {
- "actions": [
- "Microsoft.Storage/storageAccounts/blobServices/containers/read",
- "Microsoft.Storage/storageAccounts/blobServices/generateUserDelegationKey/action"
- ],
- "notActions": [],
- "dataActions": [
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write",
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write"
- ],
- "notDataActions": []
- }
- ]
- }
-}
-```
-
-#### Compute environment termination and resource disposal
-
-The following permissions are required to delete the resources created for the compute environment:
-
-```json
-{
- "properties": {
- "roleName": "seqera-azure-cloud-dispose",
- "description": "Role assumed by Seqera Platform to delete Azure Cloud compute environment resources",
- "assignableScopes": [
- "/subscriptions/"
- ],
- "permissions": [
- {
- "actions": [
- "Microsoft.Resources/subscriptions/resourceGroups/delete",
- "Microsoft.Network/publicIPAddresses/delete",
- "Microsoft.Network/virtualNetworks/delete",
- "Microsoft.Network/virtualNetworks/subnets/delete",
- "Microsoft.Network/networkInterfaces/delete",
- "Microsoft.ManagedIdentity/userAssignedIdentities/delete",
- "Microsoft.Authorization/roleAssignments/delete",
- "Microsoft.Authorization/roleDefinitions/delete",
- "Microsoft.Insights/DataCollectionRules/Delete",
- "Microsoft.Insights/DataCollectionEndpoints/Delete",
- "Microsoft.OperationalInsights/workspaces/delete",
- "Microsoft.OperationalInsights/workspaces/tables/delete"
- ],
- "notActions": [],
- "dataActions": [],
- "notDataActions": []
- }
- ]
- }
-}
-```
-
-## Add Azure Cloud credentials
-
-### Create a custom role in Microsoft Entra
-
-First, you must create a custom role with the permissions required for Seqera to manage Azure resources.
-
-1. Save the relevant permissions from the preceding sections to a local JSON file. Replace `` in the `assignableScopes` field of each permission with your Azure subscription ID.
-1. In the Azure Portal, go to **Subscriptions** and select your subscription.
-1. To create a custom role, select **Access control (IAM)**, then **Add** in the **Create a custom role** section.
-1. Provide the following details:
- - **Custom role name**: e.g., `seqera-azure-cloud`
- - **Description**: e.g., `Role for Seqera Platform to manage Azure Cloud compute environments`
- - **Baseline permissions**: Select **Start from JSON**
- - **File**: Select the local JSON file you saved earlier.
-1. Select **Next** and review the permissions to ensure all have been included correctly.
-1. Select **Next** and confirm that the assignable scope is your subscription ID.
-1. Select **Next**. If you found errors in the previous step, you can edit the JSON file here.
-1. Select **Next** and then **Create** to save the role.
-
-### Register an application in Microsoft Entra ID
-
-Create an application for Seqera to use for authentication:
-
-1. In the Azure Portal, go to **App registrations** and select **New registration**.
-1. Give the app a descriptive name, such as `SeqeraPlatformApp`.
-1. Select `Single tenant` for the supported account types.
-1. Create a client secret for the application. Seqera will use this value to authenticate to Azure, so keep it secret and store it securely.
-1. Under **Certificates & secrets**, select **New client secret** and give it a description such as `SeqeraPlatformSecret`. Set the expiration to a duration that matches your security policy. Select **Add**.
-
-After registration, you'll be taken to the application overview page. Copy and save the following values:
- - **Application (client) ID**: This is your Client ID
- - **Directory (tenant) ID**: This is your Tenant ID
-
-### Assign the custom role to the service principal
-
-Grant the service principal the necessary permissions by assigning the custom role.
-
-1. In the Azure Portal, navigate to **Subscriptions** and select your subscription. Then select **Access control (IAM)** and **Add role assignment** in the **Grant access to this resource** section.
-1. Select the **Privileged administrator roles** tab and select the role you created earlier, then select **Next**.
-1. Choose **Select members** and search for the application name (`SeqeraPlatformApp`). Then choose **Select**, then **Next**.
-1. Select **Review + assign**, **Review**, and then **Assign**.
-1. Under **What user can do**, select **Allow user to assign all roles except privileged administrator roles Owner, UAA, RBAC (Recommended)**, then select **Next**.
-1. Check the final details and select **Review + assign**.
-
-### Configure Seqera Platform credentials
-
-Add the service principal credentials to Seqera:
-
-1. Sign in to your Seqera workspace and navigate to the **Credentials** tab.
-1. Select **Add credentials**, select **Azure** as the provider, and select the **Cloud** tab for Microsoft Entra ID authentication.
-1. Enter the details of the credentials you saved earlier:
- - **Name**: Provide a descriptive name, such as `AzureCloudCredentials`
- - **Subscription ID**: Your Azure subscription ID
- - **Tenant ID**: Your Directory (tenant) ID from the [Register an application in Microsoft Entra ID](#register-an-application-in-microsoft-entra-id) instructions
- - **Client ID**: Your Application (client) ID from the [Register an application in Microsoft Entra ID](#register-an-application-in-microsoft-entra-id) instructions
- - **Client secret**: Your client secret value from the [Register an application in Microsoft Entra ID](#register-an-application-in-microsoft-entra-id) instructions
- - **Blob Storage account name**: Your Azure Storage account name
-1. Review the details, then select **Add** to save the credentials.
+## Before you start
+
+Set up the [Azure integration](/platform-cloud/integrations/cloud-providers/azure/overview) before creating an Azure Cloud compute environment in Seqera:
+
+- [Roles & service principals](/platform-cloud/integrations/cloud-providers/azure/roles-and-service-principals) — custom role JSON, app registration, and role assignment.
+- [Azure credentials](/platform-cloud/integrations/cloud-providers/azure/credentials) — Entra service principal credential setup.
+
+{/* Anchor stubs preserved for backwards compatibility with deep links from older content. */}
+
+
+
+
+
+
+
+
### Create a compute environment
diff --git a/platform-cloud/docs/compute-envs/eks.md b/platform-cloud/docs/compute-envs/eks.md
index bc518a7a0..9b8c42b5f 100644
--- a/platform-cloud/docs/compute-envs/eks.md
+++ b/platform-cloud/docs/compute-envs/eks.md
@@ -9,386 +9,36 @@ tags: [eks, amazon, compute environment, ce]
## Requirements
-- Seqera Platform needs an IAM User to obtain details about the EKS cluster, and to fetch log files from the S3 bucket, if one is used as work directory. This user must have the permissions detailed in the [Required Platform IAM permissions](#required-platform-iam-permissions) section. Optionally, permissions can instead be granted to an IAM role that the IAM user can assume when accessing AWS resources.
+- Seqera Platform needs an IAM User to obtain details about the EKS cluster, and to fetch log files from the S3 bucket, if one is used as work directory. This user must have the permissions detailed in the [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) reference (select the **Amazon EKS** tab). Optionally, permissions can instead be granted to an IAM role that the IAM user can assume when accessing AWS resources.
-- To use Fusion (recommended) to access data hosted on S3, including writing files to the Nextflow work directory, you need an IAM role that allows the EKS Service Account that Seqera pods use to interact with AWS resources. Refer to section [Configure EKS Service Account IAM role for Fusion v2](#configure-eks-service-account-iam-role-for-fusion-v2) for details.
+- To use Fusion (recommended) to access data hosted on S3, including writing files to the Nextflow work directory, you need an IAM role that allows the EKS Service Account that Seqera pods use to interact with AWS resources. Refer to section [Configure EKS Service Account IAM role for Fusion v2](/platform-cloud/integrations/cloud-providers/aws/eks-additions#configure-eks-service-account-iam-role-for-fusion-v2) for details.
Create a separate IAM role from the optional one assumed by the IAM user to separate the permissions needed by the EKS Service Account from those needed by the IAM user. If you plan to use legacy storage instead of Fusion, you can skip this step.
:::tip
-Seqera Platform assumes an EKS cluster already exists. Follow the [cluster preparation](./k8s) instructions to create the resources required by Seqera. Some administrative privileges are also needed to allow the IAM User to access the cluster, as detailed in the [EKS access](#allow-an-iam-user-or-role-access-to-eks) section.
+Seqera Platform assumes an EKS cluster already exists. Follow the [cluster preparation](./k8s) instructions to create the resources required by Seqera. Some administrative privileges are also needed to allow the IAM User to access the cluster, as detailed in the [EKS access](/platform-cloud/integrations/cloud-providers/aws/eks-additions#allow-an-iam-user-or-role-access-to-eks) section.
:::
Once you meet all the prerequisites, configure an [Amazon EKS Compute Environment](#amazon-eks-compute-environment) in Seqera.
-## Required Platform IAM permissions
-
-Seqera Platform requires an IAM user with specific permissions to launch pipelines, explore buckets with Data Explorer, and run Studio sessions on the AWS EKS compute environment. Some permissions are mandatory for the compute environment to function correctly, while others are optional and enable features like populating dropdown lists in the Platform UI.
-
-Attach permissions directly to an [IAM user](#iam-user-creation), or to an [IAM role](#iam-role-creation-optional) that the IAM user can assume.
-
-A permissive and broad policy with all the required permissions is provided here for a quick start. However, we recommend following the principle of least privilege and only granting the necessary permissions for your use case, as shown in the following sections.
-
-
-Full permissive policy (for reference)
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "EKSClusterAccessCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "eks:DescribeCluster",
- "eks:ListClusters"
- ],
- "Resource": "*"
- },
- {
- "Sid": "OptionalS3PlatformDataAccessCanBeRestricted",
- "Effect": "Allow",
- "Action": [
- "s3:Get*",
- "s3:List*",
- "s3:PutObject",
- "s3:PutObjectTagging",
- "s3:DeleteObject"
- ],
- "Resource": "*"
- }
- ]
-}
-```
-
-
-
-### EKS cluster access
-
-Seqera needs permissions to list EKS clusters in the selected region and to describe the selected cluster to retrieve its connection details. The `eks:ListClusters` action cannot be restricted to specific resources, but the `eks:DescribeCluster` action can be restricted to the specific cluster used as compute environment.
-
-```json
-{
- "Sid": "EKSClusterListing",
- "Effect": "Allow",
- "Action": [
- "eks:ListClusters"
- ],
- "Resource": "*"
-},
-{
- "Sid": "EKSClusterDescription",
- "Effect": "Allow",
- "Action": [
- "eks:DescribeCluster"
- ],
- "Resource": "arn:aws:eks:::cluster/"
-}
-```
-
-No other permissions are required for the IAM user to launch pipelines on the EKS compute environment, as the Service Account created in the [cluster preparation](./k8s) phase performs the actual management of pods and resources, which the IAM user can access via EKS authentication, detailed [in the EKS access section below](#allow-an-iam-user-or-role-access-to-eks).
-
-### S3 access (optional)
-
-Seqera automatically attempts to fetch a list of S3 buckets available in the AWS account connected to Platform, to provide them in a drop-down menu to be used as Nextflow working directory, and make the compute environment creation smoother. This feature is optional, and users can type the bucket name manually when setting up a compute environment. To allow Seqera to fetch the list of buckets in the account, the `s3:ListAllMyBuckets` action can be added, and it must have the `Resource` field set to `*`.
-
-Seqera offers several products to manipulate data on AWS S3 buckets, such as [Studios](../studios/overview) and [Data Explorer](../data/data-explorer); if these features are not needed the related permissions can be omitted.
-
-The IAM policy can be scoped down to only allow limited read/write permissions in certain S3 buckets used by Studios/Data Explorer. In addition, the policy must include permission to check the region and list the content of the S3 bucket used as Nextflow work directory. We also recommend granting the `s3:GetObject` permission on the work directory path to fetch Nextflow log files.
-
-:::note
-If you opted to create a separate S3 bucket only for Nextflow work directories, the IAM user or the Role it assumes only need read access to it. The IAM role used by the EKS Service Account (detailed in the [separate section](#configure-eks-service-account-iam-role-for-fusion-v2)) must have full read/write access to the work directory bucket to allow Fusion to operate correctly.
-:::
-
-```json
-{
- "Sid": "S3CheckBucketWorkDirectory",
- "Effect": "Allow",
- "Action": [
- "s3:ListBucket"
- ],
- "Resource": [
- "arn:aws:s3:::example-bucket-used-as-work-directory"
- ]
-},
-{
- "Sid": "S3ReadOnlyNextflowLogFiles",
- "Effect": "Allow",
- "Action": [
- "s3:GetObject"
- ],
- "Resource": [
- "arn:aws:s3:::example-bucket-used-as-work-directory/path/to/work/directory/*"
- ]
-},
-{
- "Sid": "S3ReadWriteBucketsForStudiosDataExplorer",
- "Effect": "Allow",
- "Action": [
- "s3:Get*",
- "s3:List*",
- "s3:PutObject"
- ],
- "Resource": [
- "arn:aws:s3:::example-bucket-read-write-studios",
- "arn:aws:s3:::example-bucket-read-write-studios/*",
- "arn:aws:s3:::example-bucket-read-write-data-explorer",
- "arn:aws:s3:::example-bucket-read-write-data-explorer/*"
- ]
-}
-```
-
-## Create the IAM policy
-
-The policy above must be created in the AWS account where the EKS and S3 resources are located.
-
-1. Open the [AWS IAM console](https://console.aws.amazon.com/iam).
-1. From the left navigation menu, select **Policies** under **Access management**.
-1. Select **Create policy**.
-1. On the **Policy editor** section, select the **JSON** tab.
-1. Following the instructions detailed in the [IAM permissions breakdown section](#required-platform-iam-permissions) replace the default text in the policy editor area under the **JSON** tab with a policy adapted to your use case, then select **Next**.
-1. Enter a name and description for the policy on the **Review and create** page, then select **Create policy**.
-
-## IAM user creation
-
-Seqera requires an Identity and Access Management (IAM) User to describe EKS clusters and S3 buckets in your AWS account. We recommend creating a separate IAM policy rather an IAM User inline policy, as the latter only allows 2048 characters, which may not be sufficient for all the required permissions.
-
-In certain scenarios, for example when multiple users need to access the same AWS account, an IAM role with the required permissions can be created instead, and the IAM user allowed to assume the role, as detailed in the [IAM role creation (optional)](#iam-role-creation-optional) section.
-
-### Create an IAM user
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select **Create User** at the top right of the page.
-1. Enter a name for your user (e.g., _seqera_) and select **Next**.
-1. Under **Permission options**, select **Attach policies directly**, then search for and select the policy created above, and select **Next**.
- * If you instead prefer to make the IAM user assume a role to manage AWS resources (see the [IAM role creation (optional)](#iam-role-creation-optional) section), create a policy with the following content (edit the AWS principal with the ARN of the role created) and attach it to the IAM user:
-
- ```json
- {
- "Sid": "AssumeRole",
- "Effect": "Allow",
- "Action": "sts:AssumeRole",
- "Resource": "arn:aws:iam:::role/",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- }
- ```
-1. On the last page, review the user details and select **Create user**.
-
-The user has now been created. For more details see the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html).
-
-### Obtain IAM user credentials
-
-To get the credentials needed to connect Seqera to your AWS account, follow these steps:
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select the newly created user from the users table.
-1. Select the **Security credentials** tab, then select **Create access key** under the **Access keys** section.
-1. In the **Use case** dialog that appears, select **Command line interface (CLI)**, then tick the confirmation checkbox at the bottom to acknowledge that you want to proceed creating an access key, and select **Next**.
-1. Optionally provide a description for the access key, like the reason for creating it, then select **Create access key**.
-1. Save the **Access key** and **Secret access key** in a secure location as they are needed when configuring credentials in Seqera.
-
-## IAM role creation (optional)
-
-Rather than attaching permissions directly to the IAM user, you can create an IAM role with the required permissions and allow the IAM user to assume that role when accessing AWS resources. This is useful when multiple IAM users are used to access the same AWS account: this way the actual permissions to operate on the resources are only granted to a single centralized role.
-
-1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Roles** in the left navigation menu, then select **Create role** at the top right of the page.
-1. Select **Custom trust policy** as the type of trusted entity, provide the following policy and edit the AWS principal with the ARN of the IAM user created in the [IAM user creation](#iam-user-creation) section, then select **Next**.
-
- ```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": [
- "arn:aws:iam:::user/"
- ]
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- },
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": [
- "arn:aws:iam:::user/"
- ]
- },
- "Action": "sts:TagSession"
- }
- ]
- }
- ```
-
-1. On the **Permissions** page, search for and select the policy created in the [IAM user creation](#iam-user-creation) section, then select **Next**.
-1. Give the role a name and optionally a description, review the details of the role, optionally provide tags to help you identify the role, then select **Create role**.
-
-Multiple users can be specified in the trust policy by adding more ARNs to the `Principal` section.
-
-:::note
-Seqera Platform generates the `External ID` value during AWS credential creation. For role-based credentials, use this exact value in your IAM trust policy (`sts:ExternalId`).
-:::
-
-### Role-based trust policy example (Seqera Cloud)
-
-For role-based AWS credentials in Seqera Cloud, allow the Seqera Cloud access role `arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole` in your trust policy and enforce the `External ID` generated during credential creation:
-
-```json
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "StringEquals": {
- "sts:ExternalId": ""
- }
- }
- },
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
- },
- "Action": "sts:TagSession"
- }
- ]
-}
-```
-
-## AWS credential options
-
-AWS credentials can be configured in two ways:
-
-- **Key-based credentials**: Access key and secret key with direct IAM permissions. If you provide a role ARN in **Assume role**, the **Generate External ID** switch is displayed and External ID generation is optional.
-- **Role-based credentials (recommended)**: Use role assumption only (no static keys). Paste the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**. External ID is generated automatically when you save.
-
-Use the IAM role ARN which Seqera must use for accessing your AWS resources in **Assume role**. This field is available for both key-based and role-based credentials. It is optional for key-based credentials and required for role-based credentials.
-
-Existing credentials created before March 2026 continue to work without changes.
-
-## Configure EKS Service Account IAM role for Fusion v2
-
-To use [Fusion v2](https://docs.seqera.io/fusion) in your Amazon EKS compute environment, an AWS S3 bucket must be used as work directory and both the head and compute Service Accounts (if separate) must have access to the S3 bucket specified as the work directory.
-
-If you do not plan to use Fusion in favor of legacy storage, you can skip this section.
-
-1. Create an IAM role with the following permissions:
-
- ```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Action": [
- "s3:ListBucket"
- ],
- "Resource": [
- "arn:aws:s3:::"
- ]
- },
- {
- "Action": [
- "s3:GetObject",
- "s3:PutObject",
- "s3:PutObjectTagging",
- "s3:DeleteObject"
- ],
- "Resource": [
- "arn:aws:s3:::/*"
- ],
- "Effect": "Allow"
- }
- ]
- }
- ```
-
- Replace `` with the bucket name used as work directory.
-
-1. The IAM role must also have a trust relationship with the Kubernetes service account (or accounts) that Seqera uses to manage the EKS cluster, which is `tower-launcher-sa` in the default configuration.:
-
- ```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/"
- },
- "Action": "sts:AssumeRoleWithWebIdentity",
- "Condition": {
- "StringEquals": {
- "oidc.eks..amazonaws.com/id/:aud": "sts.amazonaws.com",
- "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::"
- }
- }
- }
- ]
- }
- ```
-
- Replace ``, ``, ``, ``, `` with the corresponding values.
-
-1. Annotate the Kubernetes Service Account with the IAM role:
-
- ```shell
- kubectl annotate serviceaccount --namespace eks.amazonaws.com/role-arn=arn:aws:iam:::role/
- ```
-
- Replace `` (by default `tower-launcher-sa`, as created in the [cluster preparation guide](./k8s)), ``, and `` with the corresponding values previously defined.
-
-This will allow pods using that service account to assume the IAM role and access the S3 bucket specified as work directory.
-See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) for further details.
-
-## Allow an IAM User or Role access to EKS
-
-Configure the EKS cluster to allow the IAM user (or the IAM role it assumes) to access the cluster and manage pods.
-
-1. Retrieve from the [AWS IAM console](https://console.aws.amazon.com/iam) the ARN of the [IAM User](#iam-user-creation) or [IAM Role](#iam-role-creation-optional) previously created.
-
- :::note
- The AWS credentials for the IAM user will be used in the Seqera compute environment configuration.
- :::
-
-1. Modify the EKS aws-auth ConfigMap to allow the IAM User to access the cluster and manage pods. This step may require cluster administrator privileges:
+## Before you start
- ```bash
- kubectl edit configmap -n kube-system aws-auth
- ```
+Set up the [AWS integration](/platform-cloud/integrations/cloud-providers/aws/overview) before creating an Amazon EKS compute environment in Seqera:
-1. In the editor that opens, edit the `mapUsers` section to add the following entry, replacing `` with the user ARN retrieved from the AWS IAM console:
+- [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) — required permissions (select the **Amazon EKS** tab).
+- [AWS credentials](/platform-cloud/integrations/cloud-providers/aws/credentials) — IAM policy, IAM user, IAM role, and how to add credentials in Seqera.
+- [EKS additions](/platform-cloud/integrations/cloud-providers/aws/eks-additions) — Service Account IAM role for Fusion v2 and `aws-auth` ConfigMap setup.
- ```yaml
- mapUsers: |
- - userarn:
- username: tower-launcher-user
- groups:
- - tower-launcher-role
- ```
-
- Alternatively, an IAM role can be allowed to authenticate to the cluster: in this case, the role ARN must be specified in the **Assume role** field when configuring the Seqera compute environment (step 9 in the [Amazon EKS compute environment](#amazon-eks-compute-environment) section), the role must have a trust relationship with the Seqera IAM user, and the role `` must be added to the `mapRoles` section of the EKS auth configuration instead:
-
- ```yaml
- mapRoles: |
- - rolearn:
- username: tower-launcher-role
- groups:
- - tower-launcher-role
- ```
+Cluster preparation steps that are specific to running pods in EKS (rather than to Seqera's authentication) remain in the [Kubernetes cluster guide](/platform-cloud/compute-envs/k8s).
- See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html) for more details on modifying the aws-auth ConfigMap of an EKS cluster.
+{/* Anchor stubs preserved for backwards compatibility with deep links from older content. */}
+
+
+
+
+
+
+
+
## Amazon EKS compute environment
@@ -401,11 +51,11 @@ Once all prerequisites are met, create a Seqera EKS compute environment:
1. Select **Compute environments** from the navigation menu of the Seqera Workspace where you want to setup the CE.
1. Enter a descriptive name for this environment, e.g., `Amazon EKS (eu-west-1)`.
1. Select **Amazon EKS** as the target platform.
-1. Under **Storage**, select either **Fusion storage** (recommended) or **Legacy storage**. The [Fusion v2](https://docs.seqera.io/fusion) virtual distributed file system allows access to your AWS S3-hosted data (`s3://` URLs). This eliminates the need to configure a shared file system in your Kubernetes cluster. See [Configure EKS Service Account IAM role for Fusion v2](#configure-eks-service-account-iam-role-for-fusion-v2) below.
+1. Under **Storage**, select either **Fusion storage** (recommended) or **Legacy storage**. The [Fusion v2](https://docs.seqera.io/fusion) virtual distributed file system allows access to your AWS S3-hosted data (`s3://` URLs). This eliminates the need to configure a shared file system in your Kubernetes cluster. See [Configure EKS Service Account IAM role for Fusion v2](/platform-cloud/integrations/cloud-providers/aws/eks-additions#configure-eks-service-account-iam-role-for-fusion-v2) below.
1. From the **Credentials** drop-down, select existing AWS credentials, or select **+** to add new credentials. If you're using existing credentials, skip to step 9. The user must have the IAM permissions required to describe and list EKS clusters, per Service Account role requirements.
:::note
- You can create multiple credentials in your Seqera environment. See [Credentials](../credentials/overview).
+ You can create multiple credentials in your Seqera environment. See [Credentials](/platform-cloud/integrations/overview).
:::
1. Enter a name, e.g., `EKS Credentials`.
diff --git a/platform-cloud/docs/compute-envs/gke.md b/platform-cloud/docs/compute-envs/gke.md
index c4c2b805e..94131d11e 100644
--- a/platform-cloud/docs/compute-envs/gke.md
+++ b/platform-cloud/docs/compute-envs/gke.md
@@ -11,7 +11,7 @@ Seqera Platform offers native support for GKE clusters to streamline the deploym
## Requirements
-See [here](../compute-envs/google-cloud-batch#configure-google-cloud) for instructions to set up your Google Cloud account and other services (such as Cloud storage).
+See [Google Cloud integration](/platform-cloud/integrations/cloud-providers/google/overview) for instructions to set up your Google Cloud account and other services (such as Cloud storage).
You must have a GKE cluster up and running. Follow the [cluster preparation](../compute-envs/k8s#cluster-preparation) instructions to create the resources required by Seqera. In addition to the generic Kubernetes instructions, you must make a number of modifications specific to GKE.
@@ -58,7 +58,7 @@ After you've prepared your Kubernetes cluster and granted cluster access to your
1. Enter a name for the credentials, e.g., _GKE Credentials_.
1. Enter the **Service account key** for your Google service account.
:::tip
- You can create multiple credentials in your Seqera environment. See [Credentials](../credentials/overview).
+ You can create multiple credentials in your Seqera environment. See [Credentials](/platform-cloud/integrations/overview).
:::
1. Select the **Location** of your GKE cluster.
:::caution
diff --git a/platform-cloud/docs/compute-envs/google-cloud-batch.md b/platform-cloud/docs/compute-envs/google-cloud-batch.md
index cebd39709..5aa4d7ec5 100644
--- a/platform-cloud/docs/compute-envs/google-cloud-batch.md
+++ b/platform-cloud/docs/compute-envs/google-cloud-batch.md
@@ -14,114 +14,23 @@ The guide is split into two parts:
1. How to configure your Google Cloud account to use the Batch API.
2. How to create a Google Cloud Batch compute environment in Seqera.
-## Configure Google Cloud
-
-### Create a project
-
-Go to the [Google Project Selector page](https://console.cloud.google.com/projectselector2) and select an existing project, or select **Create project**.
-
-Enter a name for your new project, e.g., _tower-nf_.
-
-If you are part of an organization, the location will default to your organization.
-
-### Enable billing
-
-See [here](https://cloud.google.com/billing/docs/how-to/modify-project) to enable billing in your Google Cloud account.
-
-### Enable APIs
-
-See [here](https://console.cloud.google.com/flows/enableapi?apiid=batch.googleapis.com%2Ccompute.googleapis.com%2Cstorage-api.googleapis.com) to enable the following APIs for your project:
-
-- Batch API
-- Compute Engine API
-- Cloud Storage API
-
-Select your project from the dropdown menu and select **Enable**.
-
-Alternatively, you can enable each API manually by selecting your project in the navigation bar and visiting each API page:
-
-- [Batch API](https://console.cloud.google.com/marketplace/product/google/batch.googleapis.com)
-
-- [Compute Engine API](https://console.cloud.google.com/marketplace/product/google/compute.googleapis.com)
-
-- [Cloud Storage API](https://console.cloud.google.com/marketplace/product/google/storage-api.googleapis.com)
-
-### IAM
-
-Seqera requires a service account with appropriate permissions to interact with your Google Cloud resources. As an IAM user, you must have access to the service account that submits Batch jobs.
-
-:::caution
-By default, Google Cloud Batch uses the default Compute Engine service account to submit jobs. This service account is granted the Editor (`roles/Editor`) role. While this service account has the necessary permissions needed by Seqera, this role is not recommended for production environments. Control job access using a custom service account with only the permissions necessary for Seqera to execute Batch jobs instead.
-:::
-
-#### Service account permissions
-
-[Create a custom service account][create-sa] with at least the following permissions:
-
-- Batch Agent Reporter (`roles/batch.agentReporter`) on the project
-- Batch Job Editor (`roles/batch.jobsEditor`) on the project
-- Logs Writer (`roles/logging.logWriter`) on the project (to let jobs generate logs in Cloud Logging)
-- Logs Viewer (`roles/logging.logViewer`) on the project (to view and retrieve logs from Cloud Logging)
-- Service Account User (`roles/iam.serviceAccountUser`)
-
-If your Google Cloud project does not require access restrictions on any of its Cloud Storage buckets, you can grant project Storage Admin (`roles/storage.admin`) permissions to your service account to simplify setup. To grant access only to specific buckets, add the service account as a principal on each bucket individually. See [Cloud Storage bucket](#cloud-storage-bucket) below.
-
-#### User permissions
-
-Ask your Google Cloud administrator to grant you the following IAM user permissions to interact with your custom service account:
-
-- Batch Job Editor (`roles/batch.jobsEditor`) on the project
-- Service Account User (`roles/iam.serviceAccountUser`) on the job's service account (default: Compute Engine service account)
-- View Service Accounts (`roles/iam.serviceAccountViewer`) on the project
-
-To configure a credential in Seqera, you must first create a [service account JSON key file][get-json]:
-
-1. In the Google Cloud navigation menu, select **IAM & Admin > Service Accounts**.
-2. Select the email address of the service account.
-
- :::note
- The Compute Engine default service account is not recommended for production environments due to its powerful permissions. To use a service account other than the Compute Engine default, specify the service account email address under **Advanced options** on the Seqera compute environment creation form.
- :::
-
-3. Select **Keys > Add key > Create new key**.
-4. Select **JSON** as the key type.
-5. Select **Create**.
-
-A JSON file is downloaded to your computer. This file contains the credential needed to configure the compute environment in Seqera.
-
-You can manage your key from the **Service Accounts** page.
-
-### Cloud Storage bucket
-
-Google Cloud Storage is a type of **object storage**. To access files and store the results for your pipelines, create a **Cloud bucket** that your Seqera service account can access.
-
-#### Create a Cloud Storage bucket
-
-1. In the hamburger menu (**≡**), select **Cloud Storage**.
-2. From the **Buckets** tab, select **Create**.
-3. Enter a name for your bucket. You will reference this name when you create the compute environment in Seqera.
-4. Select **Region** for the **Location type** and select the **Location** for your bucket. You'll reference this location when you create the compute environment in Seqera.
-
- :::note
- The Batch API is available in a limited number of [locations][batch-locations]. These locations are only used to store metadata about the pipeline operations. The storage bucket and compute resources can be in any region.
- :::
-
-5. Select **Standard** for the default storage class.
-6. To restrict public access to your bucket data, select the **Enforce public access prevention on this bucket** checkbox.
-7. Under **Access control**, select **Uniform**.
-8. Select any additional object data protection tools, per your organization's data protection requirements.
-9. Select **Create**.
-
-#### Assign bucket permissions
-
-1. After the bucket is created, you are redirected to the **Bucket details** page.
-2. Select **Permissions**, then **Grant access** under **View by principals**.
-3. Copy the email address of your service account into **New principals**.
-4. Select the **Storage Admin** role, then select **Save**.
-
-:::tip
-You've created a project, enabled the necessary Google APIs, created a bucket, and created a service account JSON key file with the required credentials. You now have what you need to set up a new compute environment in Seqera.
-:::
+## Before you start
+
+Set up the [Google Cloud integration](/platform-cloud/integrations/cloud-providers/google/overview) before creating a Google Cloud Batch compute environment in Seqera:
+
+- [Google Cloud IAM](/platform-cloud/integrations/cloud-providers/google/iam) — project, billing, APIs, and service account permissions.
+- [Google Cloud credentials](/platform-cloud/integrations/cloud-providers/google/credentials) — generate the service account JSON key and add credentials in Seqera.
+- [Storage access](/platform-cloud/integrations/cloud-providers/google/storage-access) — Cloud Storage bucket creation and permissions.
+
+{/* Anchor stubs preserved for backwards compatibility with deep links from older content. */}
+
+
+
+
+
+
+
+
### Seqera compute environment
diff --git a/platform-cloud/docs/compute-envs/google-cloud.md b/platform-cloud/docs/compute-envs/google-cloud.md
index e9de8cc10..2fd6fefc9 100644
--- a/platform-cloud/docs/compute-envs/google-cloud.md
+++ b/platform-cloud/docs/compute-envs/google-cloud.md
@@ -79,17 +79,10 @@ To create and launch pipelines or Studio sessions with this compute environment
### Required permissions
-#### Service account permissions
+{/* Anchor stub preserved for backwards compatibility with deep links. */}
+
-[Create a custom service account](https://cloud.google.com/iam/docs/service-accounts-create#creating) with at least the following permissions:
-
-- Compute instance admin (`roles/compute.instanceAdmin.v1`)
-- Project IAM admin (`roles/resourcemanager.projectIamAdmin`)
-- Service Account Admin (`roles/iam.serviceAccountAdmin`)
-- Service Account User (`roles/iam.serviceAccountUser`)
-- Service Usage Consumer (`roles/serviceusage.serviceUsageConsumer`)
-
-If your Google Cloud project does not require access restrictions on any of its Cloud Storage buckets, you can grant project Storage Admin (`roles/storage.admin`) permissions to your service account to simplify setup. To grant access only to specific buckets, add the service account as a principal [on each bucket individually](https://docs.seqera.io/platform-cloud/compute-envs/google-cloud-batch#cloud-storage-bucket). For each Google Cloud compute environment created in the Seqera platform, a separate service account is created with the necessary permissions to launch pipelines/studios.
+Service account permissions for the Google Cloud compute environment are documented in [Google Cloud IAM](/platform-cloud/integrations/cloud-providers/google/iam). Generate a service account JSON key with [Google Cloud credentials](/platform-cloud/integrations/cloud-providers/google/credentials).
## Advanced options
diff --git a/platform-cloud/docs/compute-envs/hpc.md b/platform-cloud/docs/compute-envs/hpc.md
index c69182557..10dfb4f2f 100644
--- a/platform-cloud/docs/compute-envs/hpc.md
+++ b/platform-cloud/docs/compute-envs/hpc.md
@@ -23,9 +23,9 @@ To launch pipelines into an **HPC** cluster from Seqera, the following requireme
## Credentials
-Seqera requires SSH access to your HPC cluster to run pipelines. Use [managed identities](../credentials/managed_identities) to enable granular access control and preserve individual cluster user identities.
+Seqera requires SSH access to your HPC cluster to run pipelines. Use [managed identities](/platform-cloud/integrations/identity/managed-identities) to enable granular access control and preserve individual cluster user identities.
-You can also use workspace [SSH credentials](../credentials/ssh_credentials) for cluster login, but this provides service account access to your HPC to all Platform users. This means that all users will be granted the same file system access, and all activity is logged under the same user account on your HPC cluster.
+You can also use workspace [SSH credentials](/platform-cloud/integrations/identity/ssh-credentials) for cluster login, but this provides service account access to your HPC to all Platform users. This means that all users will be granted the same file system access, and all activity is logged under the same user account on your HPC cluster.
For HPC clusters that do not allow direct access through an SSH client, a secure connection can be authenticated with [Tower Agent](../supported_software/agent/overview).
diff --git a/platform-cloud/docs/compute-envs/k8s.md b/platform-cloud/docs/compute-envs/k8s.md
index a5c1a9b2a..abab6a132 100644
--- a/platform-cloud/docs/compute-envs/k8s.md
+++ b/platform-cloud/docs/compute-envs/k8s.md
@@ -82,7 +82,7 @@ After you've prepared your Kubernetes cluster for Seqera integration, create a c
1. From the **Credentials** drop-down, select existing Kubernetes credentials, or select **+** to add new credentials. If you choose to use existing credentials, skip to step 7.
:::tip
- You can create multiple credentials in your Seqera workspace. See [Credentials](../credentials/overview).
+ You can create multiple credentials in your Seqera workspace. See [Credentials](/platform-cloud/integrations/overview).
:::
1. Enter a name, such as _K8s Credentials_.
diff --git a/platform-cloud/docs/credentials/container_registry_credentials.md b/platform-cloud/docs/credentials/container_registry_credentials.md
deleted file mode 100644
index a6323c4df..000000000
--- a/platform-cloud/docs/credentials/container_registry_credentials.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-title: "Container registry credentials"
-description: "Configure container registry credentials for the Wave container service in Seqera Platform."
-date created: "2025-10-21"
-last updated: "2025-10-21"
-tags: [credentials, container-registry, wave, docker, authentication]
----
-
-Seqera Platform supports the configuration of credentials for the Wave container service to authenticate to private and public container registries. For more information about Wave, see [Wave containers](https://docs.seqera.io/wave).
-
-:::note
-Container registry credentials are only used by Wave containers. Enable Wave when you create a [compute environment](../compute-envs/overview) in Seqera, or add `wave { enabled=true }` to your `nextflow.config` file, for your pipeline execution to use Wave containers.
-:::
-
-## Supported container registries
-
-Seqera supports credential configuration for the following container registries. Select a registry below for detailed instructions on how to create and configure credentials for that provider:
-
-- [AWS ECR credentials](./aws_registry_credentials.md)
-- [Azure container registry credentials](./azure_registry_credentials.md)
-- [Docker Hub credentials](./docker_hub_registry_credentials.md)
-- [Gitea container registry credentials](./gitea_registry_credentials.md)
-- [GitHub container registry credentials](./github_registry_credentials.md)
-- [GitLab container registry credentials](./gitlab_registry_credentials.md)
-- [Google registry credentials](./google_registry_credentials.md)
-- [Quay container registry credentials](./quay_registry_credentials.md)
-
-## Next steps
-
-- Learn more about [Wave containers](https://docs.seqera.io/wave/provisioning).
-- Configure [compute environment credentials](../compute-envs/overview.md) for your pipeline infrastructure.
-- Set up [data repository credentials](./data_repositories.md) to access cloud storage.
diff --git a/platform-cloud/docs/credentials/overview.md b/platform-cloud/docs/credentials/overview.md
deleted file mode 100644
index 39384058a..000000000
--- a/platform-cloud/docs/credentials/overview.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-title: "Credentials overview"
-description: "Learn about credentials in Seqera Platform."
-date created: "2023-04-21"
-last updated: "2025-10-21"
-tags: [credentials, git, container, registry,]
----
-
-Seqera Platform supports secure credential management for all your infrastructure and service integrations. Configure credentials to authenticate with:
-
-- [Git hosting services][git]: Access private repositories from GitHub, GitLab, Bitbucket, and other Git providers.
-- [Container registries][registry]: Authenticate the Wave container service with private registries like Docker Hub, AWS ECR, Azure Container Registry, and Google Artifact Registry.
-- [Data repositories][data]: Connect to cloud storage services like AWS S3, Azure Blob Storage, and Google Cloud Storage.
-- [Managed identities][managed]: Use cloud provider managed identities for secure, credential-free authentication.
-- [SSH credentials][ssh]: Connect to HPC and on-premises compute environments.
-- [Agent credentials][agent]: Authenticate Seqera Agents for hybrid and on-premises deployments.
-
-:::note
-Seqera Platform encrypts all credentials with AES-256 encryption before storing them. No Seqera API exposes credentials in an unencrypted way.
-:::
-
-[git]: ../git/overview
-[registry]: ./container_registry_credentials
-[data]: ./data_repositories
-[managed]: ./managed_identities
-[ssh]: ./ssh_credentials
-[agent]: ./agent_credentials
diff --git a/platform-cloud/docs/data/data-explorer.md b/platform-cloud/docs/data/data-explorer.md
index d4d4cb945..f32ba08b4 100644
--- a/platform-cloud/docs/data/data-explorer.md
+++ b/platform-cloud/docs/data/data-explorer.md
@@ -22,7 +22,7 @@ Data Explorer lists public and private data repositories. Repositories accessibl
- **Retrieve data repositories with workspace credentials**
- Private data repositories accessible to the credentials defined in your workspace are listed in Data Explorer automatically. The permissions required for your [AWS](../compute-envs/aws-batch#required-platform-iam-permissions), [Google Cloud](../compute-envs/google-cloud-batch#iam), [Azure Batch](../compute-envs/azure-batch#storage-account), or Amazon S3-compatible API storage: credentials allow full Data Explorer functionality.
+ Private data repositories accessible to the credentials defined in your workspace are listed in Data Explorer automatically. The permissions required for your [AWS](/platform-cloud/integrations/cloud-providers/aws/iam-policies), [Google Cloud](/platform-cloud/integrations/cloud-providers/google/iam), [Azure](/platform-cloud/integrations/cloud-providers/azure/storage-access), or Amazon S3-compatible API storage: credentials allow full Data Explorer functionality.
- **Configure individual data repositories manually**
diff --git a/platform-cloud/docs/getting-started/overview.md b/platform-cloud/docs/getting-started/overview.md
index b0aac9426..ddd01267f 100644
--- a/platform-cloud/docs/getting-started/overview.md
+++ b/platform-cloud/docs/getting-started/overview.md
@@ -18,7 +18,7 @@ The Community Showcase [Launchpad](../launch/launchpad) is an example workspace
- [Datasets](../data/datasets) are collections of versioned, structured data (usually in the form of a samplesheet) in CSV or TSV format. A dataset is used as the input for a pipeline run. Sample datasets are used in pipelines with the same name, e.g., the *nf-core-rnaseq-test* dataset is used as input when you run the *nf-core-rnaseq* pipeline.
- [Compute environments](../compute-envs/overview) are the platforms where workflows are executed. A compute environment consists of access credentials, configuration settings, and storage options for the environment.
-- [Credentials](../credentials/overview) are the authentication keys Seqera uses to access compute environments, private code repositories, and external services. Credentials are SHA-256 encrypted before secure storage. The Community Showcase includes all the credentials you need to run pipelines in the included AWS Batch compute environments.
+- [Credentials](/platform-cloud/integrations/overview) are the authentication keys Seqera uses to access compute environments, private code repositories, and external services. Credentials are SHA-256 encrypted before secure storage. The Community Showcase includes all the credentials you need to run pipelines in the included AWS Batch compute environments.
- [Secrets](../secrets/overview) are retrieved and used during pipeline execution. In your private or organization workspace, you can store the access keys, licenses, or passwords required for your pipeline execution to interact with third-party services. The secrets included in the Community Showcase contain license keys to run *nf-dragen* and *nf-sentieon* pipelines in the Showcase compute environments.
## Run a pipeline with sample data
diff --git a/platform-cloud/docs/getting-started/proteinfold.md b/platform-cloud/docs/getting-started/proteinfold.md
index fde0f4455..80b02b666 100644
--- a/platform-cloud/docs/getting-started/proteinfold.md
+++ b/platform-cloud/docs/getting-started/proteinfold.md
@@ -19,7 +19,7 @@ You will need the following to get started:
- [Admin](../orgs-and-teams/roles) permissions in an existing organization workspace. See [Set up your workspace](./workspace-setup) to create an organization and workspace from scratch.
- An existing AWS cloud account with access to the AWS Batch service.
-- Existing access credentials with permissions to create and manage resources in your AWS account. See [IAM](../compute-envs/aws-batch#required-platform-iam-permissions) for guidance to set up IAM permissions for Platform.
+- Existing access credentials with permissions to create and manage resources in your AWS account. See [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) for guidance to set up IAM permissions for Platform.
:::
## Compute environment
diff --git a/platform-cloud/docs/getting-started/quickstart-demo/add-data.md b/platform-cloud/docs/getting-started/quickstart-demo/add-data.md
index 0482c33d5..ad90f7776 100644
--- a/platform-cloud/docs/getting-started/quickstart-demo/add-data.md
+++ b/platform-cloud/docs/getting-started/quickstart-demo/add-data.md
@@ -17,7 +17,7 @@ For pipeline runs in the cloud, users typically need access to buckets or blob s
### Add a cloud bucket
-Private cloud storage buckets accessible by the [credentials](../../credentials/overview) in your workspace are added to Data Explorer automatically by default. However, you can also add custom directory paths within buckets to your workspace to simplify direct access.
+Private cloud storage buckets accessible by the [credentials](/platform-cloud/integrations/overview) in your workspace are added to Data Explorer automatically by default. However, you can also add custom directory paths within buckets to your workspace to simplify direct access.
To add individual buckets (or directory paths within buckets):
diff --git a/platform-cloud/docs/getting-started/rnaseq.md b/platform-cloud/docs/getting-started/rnaseq.md
index 9bcc0d452..84db92b14 100644
--- a/platform-cloud/docs/getting-started/rnaseq.md
+++ b/platform-cloud/docs/getting-started/rnaseq.md
@@ -21,7 +21,7 @@ You will need the following to get started:
- [Admin](../orgs-and-teams/roles) permissions in an existing organization workspace. See [Set up your workspace](./workspace-setup) to create an organization and workspace from scratch.
- An existing AWS cloud account with access to the AWS Batch service.
-- Existing access credentials with permissions to create and manage resources in your AWS account. See [IAM](../compute-envs/aws-batch#required-platform-iam-permissions) for guidance to set up IAM permissions for Platform.
+- Existing access credentials with permissions to create and manage resources in your AWS account. See [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) for guidance to set up IAM permissions for Platform.
:::
## Compute environment
diff --git a/platform-cloud/docs/getting-started/studios.md b/platform-cloud/docs/getting-started/studios.md
index 3dc8d196a..1b786735c 100644
--- a/platform-cloud/docs/getting-started/studios.md
+++ b/platform-cloud/docs/getting-started/studios.md
@@ -14,7 +14,7 @@ You will need the following to get started:
- At least the **Maintain** workspace [user role](../orgs-and-teams/roles) to create and configure Studios.
- An [AWS Batch compute environment](../compute-envs/aws-batch#create-a-seqera-aws-batch-compute-environment) (**without Fargate**) with sufficient resources (minimum: 2 CPUs, 8192 MB RAM).
-- Valid [credentials](../credentials/overview) for your cloud storage account and compute environment.
+- Valid [credentials](/platform-cloud/integrations/overview) for your cloud storage account and compute environment.
- [Data Explorer](../data/data-explorer) enabled in your workspace.
:::
diff --git a/platform-cloud/docs/git/overview.md b/platform-cloud/docs/git/overview.md
deleted file mode 100644
index 368ce5c92..000000000
--- a/platform-cloud/docs/git/overview.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-title: "Git integration"
-description: "Connecting to Git repositories in Seqera Platform and Seqera AI."
-date: "10 Oct 2025"
-tags: [git]
----
-
-Data pipelines are composed of many assets, including pipeline scripts, configuration files, dependency descriptors (such as for Conda or Docker), documentation, etc. When you manage complex data pipelines as Git repositories, all assets can be versioned and deployed with a specific tag, release, or commit ID. Version control and containerization are crucial to enable reproducible pipeline executions, and provide the ability to continuously test and validate pipelines as the code evolves over time.
-
-Seqera products have built-in support for [Git](https://git-scm.com) and several Git-hosting platforms. This page covers Git integration for both **Seqera Platform** and [**Seqera AI**](#seqera-ai).
-
-## Seqera Platform
-
-Seqera Platform enables launching pipelines directly from Git repositories. Pipelines can be pulled remotely from both public and private Git providers, including the most popular platforms: GitHub, GitLab, and BitBucket.
-
-### Public repositories
-
-Launch a public Nextflow pipeline by entering its Git repository URL in the Pipeline to launch field.
-
-When you specify the Revision number, the list of available revisions are automatically pulled using the Git provider's API. By default, the default branch (usually `main` or `master`) will be used.
-
-:::tip
-[nf-core](https://nf-co.re/pipelines) is a great resource for public Nextflow pipelines.
-:::
-
-:::info
-The GitHub API imposes [rate limits](https://docs.github.com/en/developers/apps/building-github-apps/rate-limits-for-github-apps) on API requests. You can increase your rate limit by adding [GitHub credentials](#github) to your workspace as shown below.
-:::
-
-### Private repositories
-
-To access private Nextflow pipelines, add the credentials for your private Git hosting provider to Seqera.
-
-:::info
-Credentials are encrypted with the AES-256 cypher before secure storage and are never exposed in an unencrypted way by any Seqera API.
-:::
-
-### Multiple credential filtering
-
-When you have multiple stored credentials, Seqera selects the most relevant credential for your repository in the following order:
-
-1. Seqera evaluates all the stored credentials available to the current workspace.
-2. Credentials are filtered by Git provider (GitHub, GitLab, Bitbucket, etc.)
-3. Seqera selects the credential with a Repository base URL most similar to the target repository.
-4. If no Repository base URL values are specified in the workspace credentials, the most long-lived credential is selected.
-
-#### Credential filtering example
-
-Workspace A contains four credentials:
-
-**Credential A**
-- Type: GitHub
-- Repository base URL:
-
-**Credential B**
-- Type: GitHub
-- Repository base URL: `https://github.com/`
-
-**Credential C**
-- Type: GitHub
-- Repository base URL: `https://github.com/pipeline-repo`
-
-**Credential D**
-- Type: GitLab
-- Repository base URL: `https://gitlab.com/repo-a`
-
-If you launch a pipeline with a Nextflow workflow in the `https://github.com/pipeline-repo`, Seqera will use Credential C.
-
-For the application to select the most appropriate credential for your repository, we recommend that you:
-- Specify the Repository base URL values as completely as possible for each Git credential used in the workspace.
-- Favor the use of service account type credentials where possible (such as GitLab group access tokens).
-- Avoid storing multiple user-based tokens with similar permissions.
-
-## Seqera AI
-
-[Seqera AI](https://seqera.io/ask-ai/chat-v2) integrates with your pipeline GitHub repositories to provide intelligent assistance with pipeline development and modification. To fully utilize the power of Seqera AI, it needs access to your pipeline codebase to analyze, suggest changes, and even create pull requests on your behalf.
-
-### Set up GitHub access
-
-To enable Seqera AI to interact with your pipeline GitHub repositories:
-
-1. **Generate a personal access token**
- - Navigate to [GitHub Personal Access Tokens](https://github.com/settings/personal-access-tokens)
- - Create a new token with the following permissions:
- - **Pull Requests**: Read & Write
- - **Contents**: Read & Write
- - Your token value will be displayed only once. Copy it before navigating away from the tokens page.
-
-2. **Add the token to Seqera AI**
- - Open [Seqera AI](https://seqera.io/ask-ai/chat-v2).
- - In the bottom-left user menu, select **Add token**.
- - Enter your personal access token in the field provided, then select **Set token**.
-
-### Capabilities
-
-With proper GitHub access configured, Seqera AI can:
-- Access and analyze your pipeline codebase
-- Create feature branches for proposed changes
-- Generate pull requests for your review
-- Suggest improvements based on your existing code patterns
-
-:::tip
-Seqera AI respects your repository's branch protection rules and will create pull requests for review rather than directly modifying protected branches.
-:::
-
-## Seqera Platform Git provider credentials
-
-The following sections detail how to configure credentials for specific Git providers in Seqera. These credentials enable access to private repositories for pipeline execution.
-
-### Azure DevOps repositories
-
-You can authenticate to Azure DevOps repositories using a [personal access token (PAT)](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#about-pats).
-
-Once you have created and copied your access token, create a new credential in Seqera using these steps:
-
-#### Create Azure DevOps credentials
-
-1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
-2. Enter a **Name** for the new credentials.
-3. Select **Azure DevOps** as the **Provider**.
-4. Enter your **Username** and **Access token**.
-5. (Recommended) Enter the **Repository base URL** for which the credentials should be applied. This option is used to apply the provided credentials to a specific repository, e.g., `https://dev.azure.com//`.
-
-### GitHub
-
-Use an access token to connect Seqera Platform to a private [GitHub](https://github.com/) repository. Personal (classic) or fine-grained access tokens can be used.
-
-:::info
-A user's personal access token (classic) can access every repository that the user has access to. GitHub recommends using fine-grained personal access tokens (currently in beta) instead, which you can restrict to specific repositories. Fine-grained personal access tokens also enable you to specify granular permissions instead of broad scopes.
-:::
-
-For personal (classic) tokens, you must grant access to the private repository by selecting the main `repo` scope when the token is created. See [Creating a personal access token (classic)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#creating-a-personal-access-token-classic) for instructions to create your personal access token (classic).
-
-For fine-grained tokens, the repository's organization must [opt in](https://docs.github.com/en/organizations/managing-programmatic-access-to-your-organization/setting-a-personal-access-token-policy-for-your-organization) to the use of fine-grained tokens. Tokens can be restricted by resource owner (organization), repository access, and permissions. See [Creating a fine-grained personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token) for instructions to create your fine-grained access token.
-
-After you've created and copied your access token, create a new credential in Seqera:
-
-#### Create GitHub credentials
-
-1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
-1. Enter a **Name** for the new credentials.
-1. Select **GitHub** as the **Provider**.
-1. Enter your **Username** and **Access token**.
-1. (Recommended) Enter the **Repository base URL** for which the credentials should be applied. This option is used to apply the provided credentials to a specific repository, e.g., `https://github.com/seqeralabs`.
-
-### GitLab
-
-GitLab supports [Personal](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html), [Group](https://docs.gitlab.com/ee/user/group/settings/group_access_tokens.html#group-access-tokens), and [Project](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html) access tokens for authentication. Your access token must have the `api`, `read_api`, and `read_repository` scopes to work with Seqera. For all three token types, use the token value in both the **Password** and **Access token** fields in the Seqera credential creation form.
-
-After you have created and copied your access token, create a new credential in Seqera with these steps:
-
-#### Create GitLab credentials
-
-1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
-1. Enter a **Name** for the new credentials.
-1. Select **GitLab** as the **Provider**.
-1. Enter your **Username**. For Group and Project access tokens, the username can be any non-empty value.
-1. Enter your token value in both the **Password** and **Access token** fields.
-1. Enter the **Repository base URL** (recommended). This option is used to apply the credentials to a specific repository, e.g. `https://gitlab.com/seqeralabs`.
-
-### Gitea
-
-To connect to a private [Gitea](https://gitea.io/) repository, use your Gitea user credentials to create a new credential in Seqera with these steps:
-
-#### Create Gitea credentials
-
-1. From an organization workspace, go to the **Credentials** tab and select **Add Credentials**. From your personal workspace, select **Your credentials** from the user menu, then select **Add credentials**.
-1. Enter a **Name** for the new credentials.
-1. Select **Gitea** as the **Provider**.
-1. Enter your **Username**.
-1. Enter your **Password**.
-1. Enter your **Repository base URL** (required).
-
-### Bitbucket
-
-To connect to a private BitBucket repository, see [API tokens](https://support.atlassian.com/bitbucket-cloud/docs/api-tokens/) to learn how to create a BitBucket API token (the API token must have at least `read:repository:bitbucket` scope). Then, create a new credential in Seqera with these steps:
-
-:::warning
-API tokens are tied to users. This differs from access tokens, which are tied to a specific resource. While Seqera supports API tokens, access tokens are not supported for accessing BitBucket repositories.
-
-API tokens replace [app passwords](https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/), which can no longer be created after September 9, 2025 and will be phased out June 9, 2026. While app passwords are still supported, they are not recommended. See [Bitbucket Cloud transitions to API tokens](https://www.atlassian.com/blog/bitbucket/bitbucket-cloud-transitions-to-api-tokens-enhancing-security-with-app-password-deprecation) for more information.
-:::
-
-#### Create BitBucket credentials
-
-1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
-1. Enter a **Name** for the new credentials.
-1. Select **BitBucket** as the **Provider**.
-1. Enter your **Username** (account email) and **Token**.
-1. Enter the **Repository base URL** (recommended). This option can be used to apply the credentials to a specific repository, e.g., `https://bitbucket.org/seqeralabs`.
-
-### AWS CodeCommit
-
-To connect to a private AWS CodeCommit repository, see the [AWS documentation](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html) to learn more about IAM permissions for CodeCommit. Then, use your IAM account access key and secret key to create a credential in Seqera with these steps:
-
-#### Create AWS CodeCommit credentials
-
-1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
-1. Enter a **Name** for the new credentials.
-1. Select **CodeCommit** as the **Provider**.
-1. Enter the **Access key** and **Secret key** of the AWS IAM account that will be used to access the target CodeCommit repository.
-1. Enter the **Repository base URL** for which the credentials should be applied (recommended). This option can be used to apply the credentials to a specific region, e.g., `https://git-codecommit.eu-west-1.amazonaws.com`.
diff --git a/platform-cloud/docs/credentials/agent_credentials.md b/platform-cloud/docs/integrations/agent/credentials.md
similarity index 85%
rename from platform-cloud/docs/credentials/agent_credentials.md
rename to platform-cloud/docs/integrations/agent/credentials.md
index 1c0aa9c6c..b1dba8052 100644
--- a/platform-cloud/docs/credentials/agent_credentials.md
+++ b/platform-cloud/docs/integrations/agent/credentials.md
@@ -6,7 +6,7 @@ last updated: "2025-10-21"
tags: [agent, credentials]
---
-[Tower Agent](../supported_software/agent/overview) enables Seqera Platform to launch pipelines on HPC clusters that do not allow direct access through an SSH client. Tower Agent authenticates a secure connection with Seqera using a Tower Agent credential.
+[Tower Agent](/platform-cloud/supported_software/agent/overview) enables Seqera Platform to launch pipelines on HPC clusters that do not allow direct access through an SSH client. Tower Agent authenticates a secure connection with Seqera using a Tower Agent credential.
## Tower Agent sharing
diff --git a/platform-cloud/docs/integrations/cloud-providers/aws/credentials.md b/platform-cloud/docs/integrations/cloud-providers/aws/credentials.md
new file mode 100644
index 000000000..89d81c980
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/aws/credentials.md
@@ -0,0 +1,152 @@
+---
+title: "AWS credentials"
+description: "Create the IAM policy, IAM user, and IAM role for Seqera Platform on AWS, and add credentials in Seqera."
+tags: [aws, iam, credentials, integration]
+---
+
+This page covers how to create the IAM policy, IAM user, and IAM role in AWS, then add the resulting credentials to Seqera Platform. The same procedure applies to AWS Batch, AWS Cloud, and Amazon EKS compute environments — only the underlying [IAM policy](./iam-policies) differs.
+
+## Create the IAM policy
+
+The policy must be created in the AWS account where the Seqera-managed AWS resources will live.
+
+1. Open the [AWS IAM console](https://console.aws.amazon.com/iam) in the target AWS account.
+1. From the left navigation menu, select **Policies** under **Access management**.
+1. Select **Create policy**.
+1. On the **Policy editor** section, select the **JSON** tab.
+1. Replace the default text with a policy adapted to your use case (see [IAM policies](./iam-policies)), then select **Next**.
+1. Enter a name and description on the **Review and create** page, then select **Create policy**.
+
+## IAM user creation
+
+Seqera requires an IAM user to manage AWS resources in your account. We recommend creating a separate IAM policy rather than an IAM user inline policy — inline policies allow only 2,048 characters, which may not be sufficient for the required permissions.
+
+When multiple users need to access the same AWS account, create an [IAM role](#iam-role-creation-optional) with the required permissions instead and let the IAM user assume that role.
+
+### Create an IAM user
+
+1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select **Create User** at the top right.
+1. Enter a name (e.g., `seqera`) and select **Next**.
+1. Under **Permission options**, select **Attach policies directly**, then search for and select the policy you created above. Select **Next**.
+ - To make the IAM user assume a role to manage AWS resources (see [IAM role creation (optional)](#iam-role-creation-optional)), instead create a policy with the following content (edit the AWS principal with the role's ARN) and attach it to the IAM user:
+
+ ```json
+ {
+ "Sid": "AssumeRoleToManageBatchResources",
+ "Effect": "Allow",
+ "Action": "sts:AssumeRole",
+ "Resource": "arn:aws:iam:::role/",
+ "Condition": {
+ "StringEquals": {
+ "sts:ExternalId": ""
+ }
+ }
+ }
+ ```
+1. On the last page, review the user details and select **Create user**.
+
+For more details, see the [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html).
+
+### Obtain IAM user credentials
+
+1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select the newly created user.
+1. Select the **Security credentials** tab, then select **Create access key** under the **Access keys** section.
+1. In the **Use case** dialog, select **Command line interface (CLI)**, tick the confirmation checkbox, and select **Next**.
+1. Optionally provide a description, then select **Create access key**.
+1. Save the **Access key** and **Secret access key** in a secure location — you will provide them when creating credentials in Seqera.
+
+## IAM role creation (optional)
+
+Rather than attaching permissions directly to the IAM user, create an IAM role with the required permissions and let the IAM user assume that role. This is useful when multiple IAM users access the same AWS account: actual permissions to operate on resources are granted only to a single centralized role.
+
+1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Roles** in the left navigation menu, then select **Create role**.
+1. Select **Custom trust policy** as the type of trusted entity. Provide the following policy and edit the AWS principal with the IAM user ARN:
+
+ ```json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam:::user/"
+ ]
+ },
+ "Action": "sts:AssumeRole",
+ "Condition": {
+ "StringEquals": {
+ "sts:ExternalId": ""
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ "arn:aws:iam:::user/"
+ ]
+ },
+ "Action": "sts:TagSession"
+ }
+ ]
+ }
+ ```
+
+1. On the **Permissions** page, search for and select the policy created in [IAM user creation](#iam-user-creation). Select **Next**.
+1. Give the role a name and optionally a description and tags, then select **Create role**.
+
+Multiple users can be specified in the trust policy by adding more ARNs to the `Principal` section.
+
+:::note
+Seqera Platform generates the `External ID` value during AWS credential creation. For role-based credentials, use this exact value in your IAM trust policy (`sts:ExternalId`).
+:::
+
+### Role-based trust policy example (Seqera Cloud)
+
+For role-based AWS credentials in Seqera Cloud, allow the Seqera Cloud access role `arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole` in your trust policy and enforce the External ID generated during credential creation:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
+ },
+ "Action": "sts:AssumeRole",
+ "Condition": {
+ "StringEquals": {
+ "sts:ExternalId": ""
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole"
+ },
+ "Action": "sts:TagSession"
+ }
+ ]
+}
+```
+
+## AWS credential options
+
+AWS credentials can be configured in two ways:
+
+- **Key-based credentials**: Access key and secret key with direct IAM permissions. If you provide a role ARN in **Assume role**, the **Generate External ID** switch is displayed and External ID generation is optional.
+- **Role-based credentials (recommended)**: Use role assumption only (no static keys). Paste the IAM role ARN in **Assume role**. External ID is generated automatically when you save.
+
+The **Assume role** field is available for both modes. It is optional for key-based credentials and required for role-based credentials.
+
+Existing credentials created before March 2026 continue to work without changes.
+
+## Next steps
+
+- [Set up S3, EFS, or FSx work directories](./data-access).
+- For Amazon EKS, complete [Kubernetes RBAC and Service Account setup](./eks-additions).
+- For AWS Batch with manually-managed resources, see [AWS Batch manual setup](./manual-setup).
+- Add the credentials to Seqera Platform: see [the AWS Batch compute environment guide](/platform-cloud/compute-envs/aws-batch) (or [AWS Cloud](/platform-cloud/compute-envs/aws-cloud) / [EKS](/platform-cloud/compute-envs/eks)) for the in-product steps.
diff --git a/platform-cloud/docs/integrations/cloud-providers/aws/data-access.md b/platform-cloud/docs/integrations/cloud-providers/aws/data-access.md
new file mode 100644
index 000000000..eb36fe524
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/aws/data-access.md
@@ -0,0 +1,61 @@
+---
+title: "AWS data access"
+description: "S3 bucket, EFS, and FSx prerequisites for Seqera Platform on AWS."
+tags: [aws, s3, efs, fsx, integration]
+---
+
+Set up an S3 bucket, EFS, or FSx file system to use as the Nextflow work directory and to store input and output data. The IAM permissions to access these resources are documented in [AWS IAM policies](./iam-policies#s3-access-optional).
+
+## S3 bucket creation
+
+AWS S3 (Simple Storage Service) is a type of **object storage**. Use one or more **S3 buckets** to access input and output files with [Studios](/platform-cloud/studios/overview) and [Data Explorer](/platform-cloud/data/data-explorer). An S3 bucket can also store intermediate Nextflow files, as an alternative to EFS or FSx.
+
+:::note
+EFS and FSx work directories are incompatible with Studios.
+:::
+
+1. Navigate to the [AWS S3 console](https://console.aws.amazon.com/s3/home).
+1. In the top right, select the same region where you plan to create your AWS Batch compute environment.
+1. Select **Create bucket**.
+1. Enter a unique name for your bucket.
+1. Leave the rest of the options as default and select **Create bucket**.
+
+:::note
+Nextflow uses S3 to store intermediate files. In production pipelines, this can amount to a lot of data. Consider a retention policy to automatically delete intermediate files after 30 days. See the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/s3-empty-bucket-lifecycle-rule/) for more information.
+:::
+
+## EFS or FSx file system creation
+
+[AWS Elastic File System (EFS)](https://aws.amazon.com/efs/) and [AWS FSx for Lustre](https://aws.amazon.com/fsx/lustre/) are types of **file storage** that can be used as a Nextflow work directory, as an alternative to S3 buckets.
+
+:::note
+EFS and FSx work directories are incompatible with Studios.
+:::
+
+To use EFS or FSx as your Nextflow work directory, create the file system in the same region as your AWS Batch compute environment.
+
+You can let Seqera create EFS or FSx automatically when creating the AWS Batch compute environment, or create them manually. If Seqera creates the file system, it is also deleted when the compute environment is removed from Platform, unless **Dispose Resources** is disabled in the advanced options.
+
+### Creating an EFS file system
+
+To create an EFS file system manually, visit the [EFS console](https://console.aws.amazon.com/efs/home).
+
+1. Select **Create file system**.
+1. Optionally give it a name, then select the VPC where your AWS Batch compute environment will be created.
+1. Leave the rest of the options as default and select **Create file system**.
+
+### Creating an FSx file system
+
+To create an FSx for Lustre file system manually, visit the [FSx console](https://console.aws.amazon.com/fsx/home).
+
+1. Select **Create file system**.
+1. Select **FSx for Lustre**.
+1. Follow the prompts to configure the file system, then select **Next**.
+1. Review the configuration and select **Create file system**.
+
+Make sure the [Lustre client](https://docs.aws.amazon.com/fsx/latest/LustreGuide/install-lustre-client.html) is available in the AMIs used by your AWS Batch compute environment to mount FSx file systems.
+
+## Next steps
+
+- [Configure required IAM permissions for S3, EFS, and FSx](./iam-policies#s3-access-optional).
+- Create your [AWS Batch compute environment](/platform-cloud/compute-envs/aws-batch) and reference the bucket or file system as the work directory.
diff --git a/platform-cloud/docs/integrations/cloud-providers/aws/eks-additions.md b/platform-cloud/docs/integrations/cloud-providers/aws/eks-additions.md
new file mode 100644
index 000000000..dc184ddda
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/aws/eks-additions.md
@@ -0,0 +1,127 @@
+---
+title: "EKS additions"
+description: "Service Account IAM role for Fusion v2 and EKS aws-auth configuration."
+tags: [aws, eks, kubernetes, irsa, integration]
+---
+
+Amazon EKS requires two pieces of setup beyond the [base AWS IAM policies](./iam-policies) and [credentials](./credentials):
+
+1. A Kubernetes Service Account IAM role that lets pods access S3 (required for Fusion v2).
+2. An EKS authentication entry that lets the Seqera IAM user (or role) reach the cluster.
+
+## Configure EKS Service Account IAM role for Fusion v2
+
+To use [Fusion v2](https://docs.seqera.io/fusion) in your Amazon EKS compute environment, an AWS S3 bucket must be the work directory and both the head and compute Service Accounts (if separate) must have access to it.
+
+If you do not plan to use Fusion (legacy storage), skip this section.
+
+1. Create an IAM role with the following permissions:
+
+ ```json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:ListBucket"
+ ],
+ "Resource": [
+ "arn:aws:s3:::"
+ ]
+ },
+ {
+ "Action": [
+ "s3:GetObject",
+ "s3:PutObject",
+ "s3:PutObjectTagging",
+ "s3:DeleteObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::/*"
+ ],
+ "Effect": "Allow"
+ }
+ ]
+ }
+ ```
+
+ Replace `` with the bucket used as work directory.
+
+1. Create a trust relationship with the Kubernetes Service Account (or accounts) that Seqera uses to manage the EKS cluster — `tower-launcher-sa` in the default configuration:
+
+ ```json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringEquals": {
+ "oidc.eks..amazonaws.com/id/:aud": "sts.amazonaws.com",
+ "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount::"
+ }
+ }
+ }
+ ]
+ }
+ ```
+
+ Replace ``, ``, ``, ``, and ``.
+
+1. Annotate the Kubernetes Service Account with the IAM role:
+
+ ```shell
+ kubectl annotate serviceaccount --namespace eks.amazonaws.com/role-arn=arn:aws:iam:::role/
+ ```
+
+ Replace `` (default `tower-launcher-sa` from the [cluster preparation guide](/platform-cloud/compute-envs/k8s)), ``, and ``.
+
+Pods using the service account can now assume the IAM role and access the work directory bucket. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) for further details.
+
+## Allow an IAM User or Role access to EKS
+
+Configure the EKS cluster to let the IAM user (or role) access the cluster and manage pods.
+
+1. Retrieve from the [AWS IAM console](https://console.aws.amazon.com/iam) the ARN of the [IAM User](./credentials#iam-user-creation) or [IAM Role](./credentials#iam-role-creation-optional) you created.
+
+ :::note
+ The AWS credentials for the IAM user are used in the Seqera compute environment configuration.
+ :::
+
+1. Modify the EKS `aws-auth` ConfigMap to allow the IAM User to access the cluster and manage pods. This step may require cluster administrator privileges:
+
+ ```bash
+ kubectl edit configmap -n kube-system aws-auth
+ ```
+
+1. In the editor, add the following entry to `mapUsers`, replacing `` with the user ARN:
+
+ ```yaml
+ mapUsers: |
+ - userarn:
+ username: tower-launcher-user
+ groups:
+ - tower-launcher-role
+ ```
+
+ To allow an IAM role to authenticate to the cluster instead, add the role to `mapRoles`. The role ARN must be specified in the **Assume role** field on the Seqera compute environment, the role must have a trust relationship with the Seqera IAM user, and the entry takes the form:
+
+ ```yaml
+ mapRoles: |
+ - rolearn:
+ username: tower-launcher-role
+ groups:
+ - tower-launcher-role
+ ```
+
+ See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html) for more details.
+
+## Next steps
+
+- Configure the [base AWS IAM policy](./iam-policies) and [credentials](./credentials).
+- Create the [Amazon EKS compute environment](/platform-cloud/compute-envs/eks).
diff --git a/platform-cloud/docs/integrations/cloud-providers/aws/iam-policies.md b/platform-cloud/docs/integrations/cloud-providers/aws/iam-policies.md
new file mode 100644
index 000000000..5854bae3a
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/aws/iam-policies.md
@@ -0,0 +1,829 @@
+---
+title: "AWS IAM policies"
+description: "Required and optional IAM policies for Seqera Platform on AWS."
+tags: [aws, iam, permissions, integration]
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This page is the canonical reference for the AWS IAM permissions Seqera Platform needs. Pick the tab that matches your compute environment type, copy the policy, and follow [AWS credentials](./credentials) to attach it to an IAM user or role.
+
+:::tip Permission required for...
+- **AWS Batch / Batch Forge** — select the **AWS Batch** tab below.
+- **AWS Cloud (EC2-based CE)** — select the **AWS Cloud** tab.
+- **Amazon EKS** — select the **Amazon EKS** tab.
+- **S3 access for Studios / Data Explorer** — see [S3 access](#s3-access-optional).
+- **AWS Secrets Manager (pipeline secrets)** — see [Pipeline secrets](#pipeline-secrets-optional) inside the AWS Batch tab.
+- **EFS / FSx work directories** — see the AWS Batch tab.
+:::
+
+To create and launch pipelines, explore buckets with Data Explorer, or run Studio sessions, an IAM user (or assumed role) must hold the permissions documented here. Some permissions are mandatory; others are optional and only enable convenience features such as dropdown auto-population.
+
+We recommend the principle of least privilege: start from the permissive policy in your tab, then trim to your use case using the per-section breakdowns.
+
+
+
+
+
+### Full permissive policy (AWS Batch)
+
+
+Full permissive policy (for reference)
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "BatchEnvironmentManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "batch:CreateComputeEnvironment",
+ "batch:CreateJobQueue",
+ "batch:DeleteComputeEnvironment",
+ "batch:DeleteJobQueue",
+ "batch:UpdateComputeEnvironment",
+ "batch:UpdateJobQueue"
+ ],
+ "Resource": [
+ "arn:aws:batch:*:*:compute-environment/TowerForge-*",
+ "arn:aws:batch:*:*:job-queue/TowerForge-*"
+ ]
+ },
+ {
+ "Sid": "BatchEnvironmentListing",
+ "Effect": "Allow",
+ "Action": [
+ "batch:DescribeComputeEnvironments",
+ "batch:DescribeJobDefinitions",
+ "batch:DescribeJobQueues",
+ "batch:DescribeJobs"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "BatchJobsManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "batch:CancelJob",
+ "batch:RegisterJobDefinition",
+ "batch:SubmitJob",
+ "batch:TagResource",
+ "batch:TerminateJob"
+ ],
+ "Resource": [
+ "arn:aws:batch:*:*:job-definition/*",
+ "arn:aws:batch:*:*:job-queue/TowerForge-*",
+ "arn:aws:batch:*:*:job/*"
+ ]
+ },
+ {
+ "Sid": "LaunchTemplateManagement",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateLaunchTemplate",
+ "ec2:DeleteLaunchTemplate",
+ "ec2:DescribeLaunchTemplates",
+ "ec2:DescribeLaunchTemplateVersions"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "PassRolesToBatchCanBeRestricted",
+ "Effect": "Allow",
+ "Action": "iam:PassRole",
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "iam:PassedToService": [
+ "batch.amazonaws.com",
+ "ec2.amazonaws.com"
+ ]
+ }
+ }
+ },
+ {
+ "Sid": "CloudWatchLogsAccessCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "logs:Describe*",
+ "logs:FilterLogEvents",
+ "logs:Get*",
+ "logs:List*",
+ "logs:StartQuery",
+ "logs:StopQuery",
+ "logs:TestMetricFilter"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalS3PlatformDataAccessCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "s3:Get*",
+ "s3:List*",
+ "s3:PutObject"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalIAMManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "iam:AddRoleToInstanceProfile",
+ "iam:AttachRolePolicy",
+ "iam:CreateInstanceProfile",
+ "iam:CreateRole",
+ "iam:DeleteInstanceProfile",
+ "iam:DeleteRole",
+ "iam:DeleteRolePolicy",
+ "iam:DetachRolePolicy",
+ "iam:GetRole",
+ "iam:ListAttachedRolePolicies",
+ "iam:ListRolePolicies",
+ "iam:PutRolePolicy",
+ "iam:RemoveRoleFromInstanceProfile",
+ "iam:TagInstanceProfile",
+ "iam:TagRole"
+ ],
+ "Resource": [
+ "arn:aws:iam::*:role/TowerForge-*",
+ "arn:aws:iam::*:instance-profile/TowerForge-*"
+ ]
+ },
+ {
+ "Sid": "OptionalFetchOptimizedAMIMetadata",
+ "Effect": "Allow",
+ "Action": "ssm:GetParameters",
+ "Resource": "arn:aws:ssm:*:*:parameter/aws/service/ecs/*"
+ },
+ {
+ "Sid": "OptionalEC2MetadataDescribe",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeAccountAttributes",
+ "ec2:DescribeImages",
+ "ec2:DescribeInstanceTypeOfferings",
+ "ec2:DescribeInstanceTypes",
+ "ec2:DescribeKeyPairs",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeVpcs"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalFSXManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "fsx:CreateFileSystem",
+ "fsx:DeleteFileSystem",
+ "fsx:DescribeFileSystems",
+ "fsx:TagResource"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalEFSManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "elasticfilesystem:CreateFileSystem",
+ "elasticfilesystem:DeleteFileSystem",
+ "elasticfilesystem:CreateMountTarget",
+ "elasticfilesystem:DeleteMountTarget",
+ "elasticfilesystem:DescribeFileSystems",
+ "elasticfilesystem:DescribeMountTargets",
+ "elasticfilesystem:UpdateFileSystem",
+ "elasticfilesystem:PutLifecycleConfiguration",
+ "elasticfilesystem:TagResource"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalPipelineSecretsListing",
+ "Effect": "Allow",
+ "Action": "secretsmanager:ListSecrets",
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalPipelineSecretsManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "secretsmanager:DescribeSecret",
+ "secretsmanager:DeleteSecret",
+ "secretsmanager:CreateSecret"
+ ],
+ "Resource": "arn:aws:secretsmanager:*:*:secret:tower-*"
+ }
+ ]
+}
+```
+
+
+
+### AWS Batch management
+
+The first section of the policy allows Seqera to create, update, and delete Batch compute environments ("CE"), job queues ("JQ"), and jobs.
+
+If you use manually created CEs and JQs or prefer to manage their lifecycle yourself, remove the permissions to manipulate CEs and JQs. The minimum permissions required are:
+
+- `batch:DescribeJobs` to report job status
+- `batch:DescribeJobDefinitions` to list existing job definitions
+- `batch:RegisterJobDefinition` to create new job definitions
+- `batch:CancelJob` to cancel jobs
+- `batch:SubmitJob` to submit jobs
+- `batch:TagResource` to tag jobs
+- `batch:TerminateJob` to terminate jobs
+
+`batch:DescribeJobQueues` populates the existing job queues in a drop-down menu but is not required if you specify manually created job queues. It is required when you let Seqera create and manage job queues with Forge — in that case, also add `batch:DescribeComputeEnvironments`.
+
+You can also restrict permissions based on resource tags. Tags are defined by users when they [set up a pipeline in Platform](/platform-cloud/resource-labels/overview).
+
+```json
+{
+ "Sid": "BatchEnvironmentListing",
+ "Effect": "Allow",
+ "Action": [
+ "batch:DescribeJobDefinitions",
+ "batch:DescribeJobs"
+ ],
+ "Resource": "*"
+},
+{
+ "Sid": "BatchJobsManagement",
+ "Effect": "Allow",
+ "Action": [
+ "batch:CancelJob",
+ "batch:RegisterJobDefinition",
+ "batch:SubmitJob",
+ "batch:TagResource",
+ "batch:TerminateJob"
+ ],
+ "Resource": [
+ "arn:aws:batch:::job-queue/MyCustomJQ",
+ "arn:aws:batch:::job-definition/*",
+ "arn:aws:batch:::job/*"
+ ],
+ "Condition": {
+ "StringEqualsIfExists": {
+ "aws:ResourceTag/MyCustomTag": "MyCustomValue"
+ }
+ }
+}
+```
+
+:::warning
+Restricting `batch` actions with resource tags requires you to set the appropriate tags on each Seqera pipeline when configuring it in Platform. Forgetting to set the tag causes the pipeline to fail to run.
+:::
+
+Job definition and job name resources cannot be restricted to specific names because Seqera creates these with dynamic names. Use the wildcard `*` in their resource names. `batch:SubmitJob` requires permission on both job definitions and job queues — include both ARNs in the `Resource` array.
+
+:::note
+The quick start policy expects CE and JQ names automatically created by Seqera to start with the `TowerForge-` prefix. This is the default for Platform Cloud and cannot be customized.
+:::
+
+### Launch template management
+
+Seqera creates and manages EC2 launch templates using optimized AMIs identified via AWS Systems Manager (SSM).
+
+:::note
+AWS does not support restricting IAM permissions on EC2 launch templates by resource name or tag, so permission must be granted on `*`.
+:::
+
+### Pass role to Batch
+
+`iam:PassRole` allows Seqera to pass [execution IAM roles](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html#create-execution-role) to AWS Batch to run Nextflow pipelines.
+
+You can restrict the permission to manually created roles or to roles created by Seqera with the default `TowerForge-` prefix, in a specific account:
+
+```json
+{
+ "Sid": "PassRolesToBatch",
+ "Effect": "Allow",
+ "Action": "iam:PassRole",
+ "Resource": "arn:aws:iam:::role/TowerForge-*",
+ "Condition": {
+ "StringEquals": {
+ "iam:PassedToService": [
+ "batch.amazonaws.com",
+ "ec2.amazonaws.com"
+ ]
+ }
+ }
+}
+```
+
+### CloudWatch logs access
+
+Seqera reads CloudWatch logs to display run logs in the web interface. Scope this down to a [specific log group](/platform-cloud/compute-envs/aws-batch#advanced-options) on the compute environment in a specific account and region:
+
+```json
+{
+ "Sid": "CloudWatchLogsAccess",
+ "Effect": "Allow",
+ "Action": [
+ "logs:Describe*",
+ "logs:FilterLogEvents",
+ "logs:Get*",
+ "logs:List*",
+ "logs:StartQuery",
+ "logs:StopQuery",
+ "logs:TestMetricFilter"
+ ],
+ "Resource": "arn:aws:logs:::log-group:/aws/batch/job/*"
+}
+```
+
+### IAM roles for AWS Batch (optional)
+
+Seqera can automatically create the IAM roles needed to interact with AWS Batch and other AWS services. To opt out, create the IAM roles manually and provide their ARNs during compute environment creation: see [AWS Batch manual setup](./manual-setup) for details.
+
+To allow Seqera to create roles but restrict it to your account and the default IAM role prefix:
+
+```json
+{
+ "Sid": "IAMRoleAndProfileManagement",
+ "Effect": "Allow",
+ "Action": [
+ "iam:AddRoleToInstanceProfile",
+ "iam:AttachRolePolicy",
+ "iam:CreateInstanceProfile",
+ "iam:CreateRole",
+ "iam:DeleteInstanceProfile",
+ "iam:DeleteRole",
+ "iam:DeleteRolePolicy",
+ "iam:DetachRolePolicy",
+ "iam:GetRole",
+ "iam:ListAttachedRolePolicies",
+ "iam:ListRolePolicies",
+ "iam:PutRolePolicy",
+ "iam:RemoveRoleFromInstanceProfile",
+ "iam:TagInstanceProfile",
+ "iam:TagRole"
+ ],
+ "Resource": [
+ "arn:aws:iam:::role/TowerForge-*"
+ "arn:aws:iam:::instance-profile/TowerForge-*"
+ ]
+}
+```
+
+### AWS Systems Manager (optional)
+
+Seqera Platform can interact with AWS Systems Manager (SSM) to [identify ECS-optimized AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/retrieve-ecs-optimized_AMI.html) for pipeline execution. This permission is optional — you can supply a [custom AMI ID](/platform-cloud/compute-envs/aws-batch#advanced-options) at compute environment creation instead.
+
+### EC2 describe permissions (optional)
+
+Seqera reads EC2 metadata to populate VPCs, subnets, and security groups in dropdown menus during compute environment creation. Without these permissions, users must enter resource ARNs manually.
+
+:::note
+AWS does not support restricting IAM permissions on EC2 Describe actions by resource name or tag, so permission must be granted on `*`.
+:::
+
+### FSx file systems (optional)
+
+If your pipelines use [AWS FSx for Lustre](https://aws.amazon.com/fsx/), Seqera can manage the file systems. Omit this section if you do not use FSx. Describe actions cannot be restricted to specific resources; management actions can.
+
+```json
+{
+ "Sid": "FSxDescribe",
+ "Effect": "Allow",
+ "Action": [
+ "fsx:DescribeFileSystems"
+ ],
+ "Resource": "*"
+},
+{
+ "Sid": "FSxManagement",
+ "Effect": "Allow",
+ "Action": [
+ "fsx:CreateFileSystem",
+ "fsx:DeleteFileSystem",
+ "fsx:TagResource"
+ ],
+ "Resource": "arn:aws:fsx:::file-system/MyManualFSx"
+}
+```
+
+### EFS file systems (optional)
+
+If your pipelines use [AWS EFS](https://aws.amazon.com/efs/), Seqera can manage the file systems. Omit this section if you do not use EFS.
+
+```json
+{
+ "Sid": "EFSDescribe",
+ "Effect": "Allow",
+ "Action": [
+ "elasticfilesystem:DescribeFileSystems",
+ "elasticfilesystem:DescribeMountTargets"
+ ],
+ "Resource": "*"
+},
+{
+ "Sid": "EFSManagement",
+ "Effect": "Allow",
+ "Action": [
+ "elasticfilesystem:CreateFileSystem",
+ "elasticfilesystem:DeleteFileSystem",
+ "elasticfilesystem:CreateMountTarget",
+ "elasticfilesystem:DeleteMountTarget",
+ "elasticfilesystem:UpdateFileSystem",
+ "elasticfilesystem:PutLifecycleConfiguration",
+ "elasticfilesystem:TagResource"
+ ],
+ "Resource": "arn:aws:elasticfilesystem:::file-system/MyManualEFS"
+}
+```
+
+### Pipeline secrets (optional)
+
+Seqera can synchronize [pipeline secrets](/platform-cloud/secrets/overview) defined in a Platform workspace with AWS Secrets Manager. If you don't use pipeline secrets, omit this section.
+
+`secretsmanager:ListSecrets` cannot be restricted, but management actions can be scoped to a specific account and region. Seqera only creates secrets prefixed with `tower-`.
+
+```json
+{
+ "Sid": "PipelineSecretsListing",
+ "Effect": "Allow",
+ "Action": "secretsmanager:ListSecrets",
+ "Resource": "*"
+},
+{
+ "Sid": "PipelineSecretsManagementCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "secretsmanager:DescribeSecret",
+ "secretsmanager:DeleteSecret",
+ "secretsmanager:CreateSecret"
+ ],
+ "Resource": "arn:aws:secretsmanager:::secret:tower-*"
+}
+```
+
+To use pipeline secrets, the manually-created IAM roles must follow the steps in the [Secrets documentation](/platform-cloud/secrets/overview#aws-secrets-manager-integration).
+
+
+
+
+
+### Full permissive policy (AWS Cloud)
+
+The AWS Cloud compute environment runs Nextflow pipelines and Studio sessions on a single EC2 instance. Only one IAM role in AWS is required, simplifying permissions compared to AWS Batch.
+
+
+Full permissive policy (for reference)
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AwsCloudCreate",
+ "Effect": "Allow",
+ "Action": [
+ "iam:CreateRole",
+ "iam:AddRoleToInstanceProfile",
+ "iam:CreateInstanceProfile",
+ "iam:AttachRolePolicy",
+ "iam:PutRolePolicy",
+ "iam:TagRole",
+ "iam:TagInstanceProfile"
+ ],
+ "Resource": [
+ "arn:aws:iam::*:role/TowerForge-*",
+ "arn:aws:iam::*:instance-profile/TowerForge-*"
+ ]
+ },
+ {
+ "Sid": "AwsCloudCreatePassRole",
+ "Effect": "Allow",
+ "Action": [
+ "iam:PassRole"
+ ],
+ "Resource": "arn:aws:iam::*:role/TowerForge-*"
+ },
+ {
+ "Sid": "AwsCloudLaunchEC2",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateTags",
+ "ec2:DeleteTags",
+ "ec2:DescribeInstances",
+ "ec2:RunInstances",
+ "ec2:TerminateInstances"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "AwsCloudLaunchLogs",
+ "Effect": "Allow",
+ "Action": [
+ "logs:GetLogEvents"
+ ],
+ "Resource": "arn:aws:logs:*:*:log-group:*:log-stream:*"
+ },
+ {
+ "Sid": "AwsCloudLaunchS3",
+ "Effect": "Allow",
+ "Action": [
+ "s3:GetObject"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "AwsCloudDelete",
+ "Effect": "Allow",
+ "Action": [
+ "iam:GetRole",
+ "iam:ListAttachedRolePolicies",
+ "iam:ListRolePolicies",
+ "iam:DeleteRole",
+ "iam:DeleteInstanceProfile",
+ "iam:RemoveRoleFromInstanceProfile",
+ "iam:DetachRolePolicy",
+ "iam:DeleteRolePolicy"
+ ],
+ "Resource": [
+ "arn:aws:iam::*:role/TowerForge-*",
+ "arn:aws:iam::*:instance-profile/TowerForge-*"
+ ]
+ },
+ {
+ "Sid": "AwsCloudRead",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstanceTypes",
+ "ec2:DescribeKeyPairs",
+ "ec2:DescribeVpcs",
+ "ec2:DescribeImages",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups",
+ "s3:ListAllMyBuckets"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
+```
+
+
+
+### Compute environment creation
+
+Provision the IAM role that the EC2 instance will assume:
+
+```json
+{
+ "Sid": "AwsCloudCreate",
+ "Effect": "Allow",
+ "Action": [
+ "iam:CreateRole",
+ "iam:AddRoleToInstanceProfile",
+ "iam:CreateInstanceProfile",
+ "iam:AttachRolePolicy",
+ "iam:PutRolePolicy",
+ "iam:TagRole",
+ "iam:TagInstanceProfile"
+ ],
+ "Resource": [
+ "arn:aws:iam::*:role/TowerForge-*",
+ "arn:aws:iam::*:instance-profile/TowerForge-*"
+ ]
+},
+{
+ "Sid": "AwsCloudCreatePassRole",
+ "Effect": "Allow",
+ "Action": [
+ "iam:PassRole"
+ ],
+ "Resource": "arn:aws:iam::*:role/TowerForge-*"
+}
+```
+
+### Compute environment validation
+
+Validate ARNs at creation time:
+
+```json
+{
+ "Sid": "AwsCloudValidate",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstanceTypes",
+ "ec2:DescribeImages",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups"
+ ],
+ "Resource": "*"
+}
+```
+
+### Pipeline and Studio session management
+
+Launch pipelines and Studios, fetch live logs from CloudWatch, download logs from S3, and stop execution:
+
+```json
+{
+ "Sid": "AwsCloudLaunchEC2",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateTags",
+ "ec2:DeleteTags",
+ "ec2:DescribeInstances",
+ "ec2:RunInstances",
+ "ec2:TerminateInstances"
+ ],
+ "Resource": "*"
+},
+{
+ "Sid": "AwsCloudLaunchLogs",
+ "Effect": "Allow",
+ "Action": [
+ "logs:GetLogEvents"
+ ],
+ "Resource": "arn:aws:logs:*:*:log-group:*:log-stream:*"
+},
+{
+ "Sid": "AwsCloudLaunchS3",
+ "Effect": "Allow",
+ "Action": [
+ "s3:GetObject"
+ ],
+ "Resource": "arn:aws:s3:::/WORKDIR/*"
+}
+```
+
+### Compute environment termination and resource disposal
+
+Remove resources created by Seqera when the compute environment is deleted:
+
+```json
+{
+ "Sid": "AwsCloudDelete",
+ "Effect": "Allow",
+ "Action": [
+ "iam:GetRole",
+ "iam:ListAttachedRolePolicies",
+ "iam:ListRolePolicies",
+ "iam:DeleteRole",
+ "iam:DeleteInstanceProfile",
+ "iam:RemoveRoleFromInstanceProfile",
+ "iam:DetachRolePolicy",
+ "iam:DeleteRolePolicy"
+ ],
+ "Resource": [
+ "arn:aws:iam::*:role/TowerForge-*",
+ "arn:aws:iam::*:instance-profile/TowerForge-*"
+ ]
+}
+```
+
+### Optional permissions
+
+Populate dropdown values in the Platform UI. If missing, fields will not auto-populate but can still be entered manually.
+
+```json
+{
+ "Sid": "AwsCloudRead",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstanceTypes",
+ "ec2:DescribeKeyPairs",
+ "ec2:DescribeVpcs",
+ "ec2:DescribeImages",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups",
+ "s3:ListAllMyBuckets"
+ ],
+ "Resource": "*"
+}
+```
+
+
+
+
+
+### Full permissive policy (EKS)
+
+For Amazon EKS, Seqera Platform requires permissions to list and describe clusters. Cluster operations are performed through Kubernetes RBAC by the Service Account, not through the IAM user — see [EKS additions](./eks-additions) for RBAC setup.
+
+
+Full permissive policy (for reference)
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "EKSClusterAccessCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "eks:DescribeCluster",
+ "eks:ListClusters"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "OptionalS3PlatformDataAccessCanBeRestricted",
+ "Effect": "Allow",
+ "Action": [
+ "s3:Get*",
+ "s3:List*",
+ "s3:PutObject",
+ "s3:PutObjectTagging",
+ "s3:DeleteObject"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
+```
+
+
+
+### EKS cluster access
+
+`eks:ListClusters` cannot be restricted to specific resources, but `eks:DescribeCluster` can be restricted to the cluster used as compute environment.
+
+```json
+{
+ "Sid": "EKSClusterListing",
+ "Effect": "Allow",
+ "Action": [
+ "eks:ListClusters"
+ ],
+ "Resource": "*"
+},
+{
+ "Sid": "EKSClusterDescription",
+ "Effect": "Allow",
+ "Action": [
+ "eks:DescribeCluster"
+ ],
+ "Resource": "arn:aws:eks:::cluster/"
+}
+```
+
+No other IAM user permissions are required to launch pipelines on EKS. The Service Account performs the management of pods and resources, and the IAM user accesses it via [EKS authentication](./eks-additions#allow-an-iam-user-or-role-access-to-eks).
+
+
+
+
+## S3 access (optional)
+
+Seqera can fetch a list of S3 buckets to populate dropdown menus during compute environment creation. This is optional — users can enter bucket names manually. To enable list, add `s3:ListAllMyBuckets` with `Resource: *`.
+
+[Studios](/platform-cloud/studios/overview) and [Data Explorer](/platform-cloud/data/data-explorer) require additional S3 permissions; if you don't use those features, omit them.
+
+You can scope down to limited read/write on specific buckets, plus permission to check the region and list the work-directory bucket. We recommend granting `s3:GetObject` on the work directory path to fetch Nextflow log files.
+
+:::note
+If your S3 bucket is dedicated to Nextflow work directories, the IAM user does not need read/write access — Forge-created IAM roles will have the necessary permissions. If you set up the compute environment manually, see [AWS Batch manual setup](./manual-setup) for IAM role creation.
+:::
+
+```json
+{
+ "Sid": "S3CheckBucketWorkDirectory",
+ "Effect": "Allow",
+ "Action": [
+ "s3:GetBucketLocation",
+ "s3:ListBucket"
+ ],
+ "Resource": [
+ "arn:aws:s3:::example-bucket-used-as-work-directory"
+ ]
+},
+{
+ "Sid": "S3ReadOnlyNextflowLogFiles",
+ "Effect": "Allow",
+ "Action": [
+ "s3:GetObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::example-bucket-used-as-work-directory/path/to/work/directory/*"
+ ]
+},
+{
+ "Sid": "S3ReadWriteBucketsForStudiosDataExplorer",
+ "Effect": "Allow",
+ "Action": [
+ "s3:Get*",
+ "s3:List*",
+ "s3:PutObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::example-bucket-read-write-studios",
+ "arn:aws:s3:::example-bucket-read-write-studios/*",
+ "arn:aws:s3:::example-bucket-read-write-data-explorer",
+ "arn:aws:s3:::example-bucket-read-write-data-explorer/*"
+ ]
+}
+```
+
+## Next steps
+
+- [Create the IAM policy, IAM user, and IAM role](./credentials) to attach these permissions in AWS.
+- [Set up data access (S3, EFS, FSx)](./data-access) for your work directory.
+- For Amazon EKS, also configure [Kubernetes RBAC and the Service Account IAM role](./eks-additions).
+- For AWS Batch with manually-managed resources, see [AWS Batch manual setup](./manual-setup).
diff --git a/platform-cloud/docs/enterprise/advanced-topics/manual-aws-batch-setup.mdx b/platform-cloud/docs/integrations/cloud-providers/aws/manual-setup.mdx
similarity index 97%
rename from platform-cloud/docs/enterprise/advanced-topics/manual-aws-batch-setup.mdx
rename to platform-cloud/docs/integrations/cloud-providers/aws/manual-setup.mdx
index f59c31ef6..3cbdf2bad 100644
--- a/platform-cloud/docs/enterprise/advanced-topics/manual-aws-batch-setup.mdx
+++ b/platform-cloud/docs/integrations/cloud-providers/aws/manual-setup.mdx
@@ -334,7 +334,7 @@ Create a launch template to configure the EC2 instances deployed by Batch jobs:
### Create the Batch compute environments
:::caution
-AWS Graviton instances (ARM64 CPU architecture) are not supported in manual compute environments. To use Graviton instances, create your AWS Batch compute environment with [Batch Forge](../../compute-envs/aws-batch#create-a-seqera-aws-batch-compute-environment).
+AWS Graviton instances (ARM64 CPU architecture) are not supported in manual compute environments. To use Graviton instances, create your AWS Batch compute environment with [Batch Forge](/platform-cloud/compute-envs/aws-batch#create-a-seqera-aws-batch-compute-environment).
:::
Nextflow makes use of two job queues during workflow execution:
@@ -358,7 +358,7 @@ The head queue requires an on-demand compute environment. Do not select **Use Sp
1. In the [Batch Console](https://eu-west-1.console.aws.amazon.com/batch/home), select **Create** on the Compute environments page.
1. Select **Amazon EC2** as the compute environment configuration.
:::note
- Seqera AWS Batch compute environments created with [Batch Forge](../../compute-envs/aws-batch#create-a-seqera-aws-batch-compute-environment) support using Fargate for the head job, but manual compute environments must use EC2.
+ Seqera AWS Batch compute environments created with [Batch Forge](/platform-cloud/compute-envs/aws-batch#create-a-seqera-aws-batch-compute-environment) support using Fargate for the head job, but manual compute environments must use EC2.
:::
1. Enter a name of your choice, and apply the `seqera-servicerole` and `seqera-instancerole`.
1. When creating the Seqera compute environment, enter the ARN of `seqera-headjob-role` in the **Head Job role** field.
@@ -414,4 +414,4 @@ You only need to create one queue if you intend to use on-demand instances for y
-Use the AWS resources created on this page to create your [manual AWS Batch compute environment](../../compute-envs/aws-batch#manual-configuration-of-batch-resources).
+Use the AWS resources created on this page to create your [manual AWS Batch compute environment](/platform-cloud/compute-envs/aws-batch#manual-configuration-of-batch-resources).
diff --git a/platform-cloud/docs/integrations/cloud-providers/aws/overview.md b/platform-cloud/docs/integrations/cloud-providers/aws/overview.md
new file mode 100644
index 000000000..2873583ea
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/aws/overview.md
@@ -0,0 +1,37 @@
+---
+title: "AWS integration"
+description: "Connect Seqera Platform to AWS — IAM, credentials, data access, and EKS additions."
+tags: [aws, integration]
+---
+
+Seqera Platform integrates with AWS to run pipelines on AWS Batch, AWS Cloud, and Amazon EKS, and to read input/output data from S3, EFS, and FSx. This page is the entry point for the AWS-side setup.
+
+## Setup checklist
+
+| Step | What to do | Page |
+|---|---|---|
+| 1 | Create an IAM policy with the permissions Seqera needs | [IAM policies](./iam-policies) |
+| 2 | Create the IAM user (and optionally an IAM role) | [Credentials](./credentials) |
+| 3 | Create an S3 bucket — and optionally EFS or FSx — for the work directory | [Data access](./data-access) |
+| 4 | (EKS only) Set up the Service Account IAM role and `aws-auth` | [EKS additions](./eks-additions) |
+| 5 | (Manual Batch only) Create AWS Batch resources by hand | [AWS Batch manual setup](./manual-setup) |
+| 6 | Add the credentials to Seqera and create the compute environment | [AWS Batch](/platform-cloud/compute-envs/aws-batch), [AWS Cloud](/platform-cloud/compute-envs/aws-cloud), [Amazon EKS](/platform-cloud/compute-envs/eks) |
+
+## Choose a credential model
+
+Seqera supports two AWS credential modes:
+
+- **Key-based credentials** — Access key + secret key with direct IAM permissions. Optionally pair with **Assume role** to scope down via role assumption.
+- **Role-based credentials (recommended)** — Role assumption only, no static keys. Seqera generates an External ID automatically.
+
+For Seqera Cloud, role-based credentials let you allow only the Seqera Cloud access role `arn:aws:iam::161471496260:role/SeqeraPlatformCloudAccessRole` in your trust policy. See [Role-based trust policy example](./credentials#role-based-trust-policy-example-seqera-cloud).
+
+## Choose a compute environment type
+
+| Compute environment | When to use it | IAM scope |
+|---|---|---|
+| **AWS Batch** | Production-scale pipelines, mature workloads | Largest IAM surface (Batch + EC2 + IAM + optional services) |
+| **AWS Cloud** (preview) | Studios and small/medium pipelines, simplified setup | Smallest IAM surface (single role) |
+| **Amazon EKS** | Pipelines on existing Kubernetes clusters | EKS describe + Kubernetes RBAC |
+
+The IAM policy you create depends on which compute environment(s) you plan to use. See the tabbed policy in [IAM policies](./iam-policies).
diff --git a/platform-cloud/docs/integrations/cloud-providers/azure/credentials.md b/platform-cloud/docs/integrations/cloud-providers/azure/credentials.md
new file mode 100644
index 000000000..81613537e
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/azure/credentials.md
@@ -0,0 +1,71 @@
+---
+title: "Azure credentials"
+description: "Add Azure access keys or Entra service principal credentials to Seqera Platform."
+tags: [azure, credentials, entra, integration]
+---
+
+Once you have an Azure Storage account, a Batch account (if using Azure Batch), and either access keys or a registered Entra application, add the credentials to Seqera Platform.
+
+## Credential types
+
+### Access keys
+
+Access keys are simple but limited:
+
+- Long-lived.
+- Provide full access to the Storage and Batch accounts.
+- Azure allows only two access keys per account.
+- Do not support VNet/subnet configuration.
+
+To create an access key credential:
+
+1. In the Azure Portal, locate your Azure Batch account and select **Keys** under **Account management**. Copy a Primary or Secondary key.
+1. Locate your Azure Storage account and select **Access keys** under **Security and Networking**. Copy a Key1 or Key2 value.
+1. In your Platform workspace **Credentials** tab, select **Add credentials** and complete:
+ - **Name**
+ - **Provider**: Azure
+ - Select the **Shared key** tab
+ - **Batch account** and **Blob Storage account** names and access keys
+1. Delete the keys from their temporary location once they have been added to a credential in Platform.
+
+### Entra service principal (recommended)
+
+Entra service principals support role-based access control with precise permissions, work across multiple Batch and Storage accounts, and unlock features such as VNet/subnet configuration.
+
+You must first complete [Roles & service principals](./roles-and-service-principals): create a custom role (Azure Cloud) or assign built-in roles (Azure Batch), register the application, and assign the role to the service principal.
+
+To add the credentials to Seqera:
+
+1. In your Platform workspace **Credentials** tab, select **Add credentials** and complete:
+ - **Name**
+ - **Provider**: Azure
+ - Select the **Entra** tab
+ - **Subscription ID**: your Azure subscription ID
+ - **Tenant ID**: your Directory (tenant) ID
+ - **Client ID**: your Application (client) ID
+ - **Client secret**: your client secret value
+ - **Batch account name** (Azure Batch only)
+ - **Blob Storage account name**
+1. Delete the values from their temporary location once added to a credential in Platform.
+
+### Managed identity (Azure Batch)
+
+To use a managed identity, Seqera requires Nextflow version 24.06.0-edge or later.
+
+Nextflow can authenticate to Azure services using a managed identity attached to the Azure Batch pool. Seqera still needs access keys or an Entra service principal to submit the initial task to Azure Batch; Nextflow then uses the managed identity for subsequent authentication.
+
+1. In Azure, create a user-assigned managed identity. See [Manage user-assigned managed identities](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). Note both the **client ID** and the **resource ID**.
+1. The managed identity must have the necessary access roles for Nextflow. See [Required role assignments](https://docs.seqera.io/nextflow/azure#required-role-assignments).
+1. Associate the managed identity with the Azure Batch Pool. See [Set up managed identity in your Batch pool](https://learn.microsoft.com/en-us/troubleshoot/azure/hpc/batch/use-managed-identities-azure-batch-account-pool#set-up-managed-identity-in-your-batch-pool).
+1. When you set up the Seqera compute environment, select the Azure Batch pool by name and enter the managed identity **client ID** and (optionally) the **resource ID**.
+
+When you submit a pipeline, Nextflow authenticates using the managed identity associated with the Azure Batch node, rather than relying on access keys.
+
+:::caution
+If a managed identity is misconfigured (e.g., invalid client ID or missing RBAC roles), the pipeline fails with an explicit error. Seqera does not silently fall back to access key authentication.
+:::
+
+## Next steps
+
+- Create the [Azure Batch](/platform-cloud/compute-envs/azure-batch) or [Azure Cloud](/platform-cloud/compute-envs/azure-cloud) compute environment.
+- For manually-managed Azure Batch pools, see [Azure Batch manual setup](./manual-setup).
diff --git a/platform-cloud/docs/enterprise/advanced-topics/manual-azure-batch-setup.md b/platform-cloud/docs/integrations/cloud-providers/azure/manual-setup.md
similarity index 97%
rename from platform-cloud/docs/enterprise/advanced-topics/manual-azure-batch-setup.md
rename to platform-cloud/docs/integrations/cloud-providers/azure/manual-setup.md
index 6a234a4e2..8e5b65bc9 100644
--- a/platform-cloud/docs/enterprise/advanced-topics/manual-azure-batch-setup.md
+++ b/platform-cloud/docs/integrations/cloud-providers/azure/manual-setup.md
@@ -275,7 +275,7 @@ See [Microsoft Entra](https://docs.seqera.io/nextflow/azure#microsoft-entra) in
1. [Create an Azure service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal).
1. [Assign roles to the service principal](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
1. [Get the Service Principal ID, Tenant ID, and Client Secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret).
-1. [Add to Seqera credentials](../../compute-envs/azure-batch.md#entra-service-principal-and-managed-identity)
+1. [Add to Seqera credentials](/platform-cloud/integrations/cloud-providers/azure/credentials)
In Seqera:
@@ -336,7 +336,7 @@ You can also use User Subscription mode instead of Batch Managed here, but this
It is common to attach Azure Batch pools to a virtual network. This is useful to connect to other resources in the same VNet or place things behind enhanced security. Seqera Platform does not support this feature directly, so you must manually create an Azure Batch pool.
-See [Create a Nextflow-compatible Azure Batch pool](../../compute-envs/azure-batch#create-a-nextflow-compatible-azure-batch-pool) to create an Azure Batch pool manually that is compatible with Seqera and Nextflow. Use the following settings:
+See [Create a Nextflow-compatible Azure Batch pool](/platform-cloud/compute-envs/azure-batch#create-a-nextflow-compatible-azure-batch-pool) to create an Azure Batch pool manually that is compatible with Seqera and Nextflow. Use the following settings:
- Name & ID: `3-azure-batch-vnet`
- Add the managed identity created earlier as a user-assigned managed identity.
@@ -430,5 +430,5 @@ If you wish to keep the Azure resources, you can remove each pool within a Batch
[install-azure-cli]: https://learn.microsoft.com/en-us/cli/azure/install-azure-cli
[install-seqera-cli]: /platform-cli/installation
[nextflow-working-directory]: https://docs.seqera.io/nextflow/cache-and-resume#work-directory
-[create-org-workspace]: ../../getting-started/workspace-setup
-[add-pipeline]: ../../getting-started/quickstart-demo/add-pipelines#add-from-the-launchpad
+[create-org-workspace]: /platform-cloud/getting-started/workspace-setup
+[add-pipeline]: /platform-cloud/getting-started/quickstart-demo/add-pipelines#add-from-the-launchpad
diff --git a/platform-cloud/docs/integrations/cloud-providers/azure/overview.md b/platform-cloud/docs/integrations/cloud-providers/azure/overview.md
new file mode 100644
index 000000000..be6d70d31
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/azure/overview.md
@@ -0,0 +1,36 @@
+---
+title: "Azure integration"
+description: "Connect Seqera Platform to Azure — roles, service principals, credentials, and storage."
+tags: [azure, integration]
+---
+
+Seqera Platform integrates with Azure to run pipelines on Azure Batch and Azure Cloud, and to read input/output data from Azure Blob Storage. This page is the entry point for the Azure-side setup.
+
+## Setup checklist
+
+| Step | What to do | Page |
+|---|---|---|
+| 1 | Create a custom role and register an Entra application (service principal) | [Roles & service principals](./roles-and-service-principals) |
+| 2 | Create or identify a Storage account and Blob container for the work directory | [Storage access](./storage-access) |
+| 3 | (Azure Batch only) Create a Batch account and decide on credential type | [Roles & service principals](./roles-and-service-principals) |
+| 4 | Add the credentials to Seqera | [Credentials](./credentials) |
+| 5 | (Manual Batch only) Create Azure Batch pools and resources by hand | [Azure Batch manual setup](./manual-setup) |
+| 6 | Create the compute environment | [Azure Batch](/platform-cloud/compute-envs/azure-batch) or [Azure Cloud](/platform-cloud/compute-envs/azure-cloud) |
+
+## Choose a credential type
+
+Azure supports two credential types:
+
+- **Access keys** — Long-lived keys for the Storage and Batch accounts. Simple but limited (no VNet/subnet, two keys per account, broad access).
+- **Entra service principals (recommended)** — Role-based access control via a registered application in Microsoft Entra ID. Required for VNet/subnet configuration. Optionally pair with a managed identity attached to the Batch pool for the most secure setup.
+
+Both credential types support Batch Forge and Manual compute environments.
+
+## Choose a compute environment type
+
+| Compute environment | When to use it | Credential model |
+|---|---|---|
+| **Azure Batch** | Production-scale pipelines | Access keys or Entra service principal |
+| **Azure Cloud** (preview) | Studios and small/medium pipelines, simplified setup | Entra service principal only |
+
+For Azure Cloud, see the dedicated custom role JSON in [Roles & service principals](./roles-and-service-principals).
diff --git a/platform-cloud/docs/integrations/cloud-providers/azure/roles-and-service-principals.md b/platform-cloud/docs/integrations/cloud-providers/azure/roles-and-service-principals.md
new file mode 100644
index 000000000..a8dfbea91
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/azure/roles-and-service-principals.md
@@ -0,0 +1,353 @@
+---
+title: "Azure roles & service principals"
+description: "Custom roles, app registration, and role assignment for Seqera Platform on Azure."
+tags: [azure, role, service-principal, entra, integration]
+---
+
+Use [Azure custom roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles) for granular control over the permissions granted to Seqera, and assign them to a service principal that Seqera authenticates as.
+
+This page covers:
+
+- The full custom role JSON for the Azure Cloud compute environment, broken down by purpose.
+- Role assignments for Azure Batch.
+- Application registration and role assignment in the Azure Portal.
+
+## Azure Cloud custom role
+
+The full role JSON definition for Azure Cloud:
+
+```json
+{
+ "properties": {
+ "roleName": "seqera-azure-cloud",
+ "description": "Role assumed by Seqera Platform to create Azure Cloud compute environments",
+ "assignableScopes": [
+ "/subscriptions/"
+ ],
+ "permissions": [
+ {
+ "actions": [
+ "Microsoft.Compute/virtualMachines/read",
+ "Microsoft.Compute/virtualMachines/write",
+ "Microsoft.Compute/virtualMachines/delete",
+ "Microsoft.Compute/virtualMachines/deallocate/action",
+ "Microsoft.Compute/virtualMachines/attachDetachDataDisks/action",
+
+ "Microsoft.Resources/subscriptions/resourceGroups/write",
+ "Microsoft.Resources/subscriptions/resourceGroups/read",
+ "Microsoft.Resources/subscriptions/resourceGroups/delete",
+
+ "Microsoft.Network/publicIPAddresses/read",
+ "Microsoft.Network/publicIPAddresses/write",
+ "Microsoft.Network/publicIPAddresses/delete",
+ "Microsoft.Network/publicIPAddresses/join/action",
+
+ "Microsoft.Network/virtualNetworks/read",
+ "Microsoft.Network/virtualNetworks/write",
+ "Microsoft.Network/virtualNetworks/delete",
+ "Microsoft.Network/virtualNetworks/subnets/read",
+ "Microsoft.Network/virtualNetworks/subnets/write",
+ "Microsoft.Network/virtualNetworks/subnets/delete",
+ "Microsoft.Network/virtualNetworks/subnets/join/action",
+
+ "Microsoft.Network/networkInterfaces/delete",
+ "Microsoft.Network/networkInterfaces/write",
+ "Microsoft.Network/networkInterfaces/read",
+ "Microsoft.Network/networkInterfaces/join/action",
+
+ "Microsoft.ManagedIdentity/userAssignedIdentities/read",
+ "Microsoft.ManagedIdentity/userAssignedIdentities/write",
+ "Microsoft.ManagedIdentity/userAssignedIdentities/delete",
+ "Microsoft.ManagedIdentity/userAssignedIdentities/assign/action",
+
+ "Microsoft.Authorization/roleAssignments/read",
+ "Microsoft.Authorization/roleAssignments/write",
+ "Microsoft.Authorization/roleAssignments/delete",
+
+ "Microsoft.Authorization/roleDefinitions/read",
+ "Microsoft.Authorization/roleDefinitions/write",
+ "Microsoft.Authorization/roleDefinitions/delete",
+
+ "Microsoft.Insights/DataCollectionRules/Read",
+ "Microsoft.Insights/DataCollectionRules/Write",
+ "Microsoft.Insights/DataCollectionRules/Delete",
+
+ "Microsoft.Insights/DataCollectionEndpoints/Write",
+ "Microsoft.Insights/DataCollectionEndpoints/Delete",
+
+ "Microsoft.OperationalInsights/workspaces/write",
+ "Microsoft.OperationalInsights/workspaces/read",
+ "Microsoft.OperationalInsights/workspaces/delete",
+ "Microsoft.OperationalInsights/workspaces/sharedkeys/action",
+ "Microsoft.OperationalInsights/workspaces/tables/read",
+ "Microsoft.OperationalInsights/workspaces/tables/write",
+ "Microsoft.OperationalInsights/workspaces/tables/delete",
+ "Microsoft.OperationalInsights/workspaces/query/read",
+ "Microsoft.OperationalInsights/workspaces/query/Tables.Custom/read",
+
+ "Microsoft.Storage/storageAccounts/blobServices/containers/read",
+ "Microsoft.Storage/storageAccounts/blobServices/generateUserDelegationKey/action"
+ ],
+ "notActions": [],
+ "dataActions": [
+ "Microsoft.Insights/Telemetry/Write",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write",
+ "Microsoft.OperationalInsights/workspaces/tables/data/read"
+ ],
+ "notDataActions": []
+ }
+ ]
+ }
+}
+```
+
+See [Start from JSON](https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles-portal#start-from-json) to create this custom role in the Azure Portal.
+
+This role can be applied as-is for convenience, or broken down into the smaller roles below.
+
+### Compute environment creation
+
+Provision resources in the Azure account when first creating the compute environment:
+
+```json
+{
+ "properties": {
+ "roleName": "seqera-azure-cloud-create",
+ "description": "Role assumed by Seqera Platform to create Azure Cloud compute environments",
+ "assignableScopes": [
+ "/subscriptions/"
+ ],
+ "permissions": [
+ {
+ "actions": [
+ "Microsoft.Resources/subscriptions/resourceGroups/read",
+ "Microsoft.Resources/subscriptions/resourceGroups/write",
+
+ "Microsoft.Storage/storageAccounts/blobServices/containers/read",
+ "Microsoft.Network/virtualNetworks/read",
+ "Microsoft.Network/virtualNetworks/write",
+
+ "Microsoft.Network/virtualNetworks/subnets/read",
+ "Microsoft.Network/virtualNetworks/subnets/write",
+
+ "Microsoft.ManagedIdentity/userAssignedIdentities/read",
+ "Microsoft.ManagedIdentity/userAssignedIdentities/write",
+
+ "Microsoft.Authorization/roleAssignments/read",
+ "Microsoft.Authorization/roleAssignments/write",
+
+ "Microsoft.Authorization/roleDefinitions/read",
+ "Microsoft.Authorization/roleDefinitions/write",
+
+ "Microsoft.Insights/DataCollectionRules/Read",
+ "Microsoft.Insights/DataCollectionRules/Write",
+
+ "Microsoft.Insights/DataCollectionEndpoints/Write",
+
+ "Microsoft.OperationalInsights/workspaces/read",
+ "Microsoft.OperationalInsights/workspaces/write",
+ "Microsoft.OperationalInsights/workspaces/tables/write"
+ ],
+ "notActions": [],
+ "dataActions": [],
+ "notDataActions": []
+ }
+ ]
+ }
+}
+```
+
+### Pipeline and Studio launch
+
+Launch pipelines and Studios:
+
+```json
+{
+ "properties": {
+ "roleName": "seqera-azure-cloud-launch",
+ "description": "Role assumed by Seqera Platform to launch Studios and pipelines on Azure Cloud compute environments",
+ "assignableScopes": [
+ "/subscriptions/"
+ ],
+ "permissions": [
+ {
+ "actions": [
+ "Microsoft.Compute/virtualMachines/read",
+ "Microsoft.Compute/virtualMachines/write",
+ "Microsoft.Compute/virtualMachines/delete",
+ "Microsoft.Compute/virtualMachines/deallocate/action",
+ "Microsoft.Compute/virtualMachines/attachDetachDataDisks/action",
+
+ "Microsoft.Network/publicIPAddresses/read",
+ "Microsoft.Network/publicIPAddresses/write",
+ "Microsoft.Network/publicIPAddresses/delete",
+ "Microsoft.Network/publicIPAddresses/join/action",
+
+ "Microsoft.Network/networkInterfaces/read",
+ "Microsoft.Network/networkInterfaces/write",
+ "Microsoft.Network/networkInterfaces/join/action",
+ "Microsoft.Network/virtualNetworks/subnets/join/action",
+
+ "Microsoft.ManagedIdentity/userAssignedIdentities/assign/action",
+
+ "Microsoft.Insights/DataCollectionRules/Write",
+ "Microsoft.Insights/DataCollectionEndpoints/Write"
+ ],
+ "notActions": [],
+ "dataActions": [
+ "Microsoft.Insights/Telemetry/Write"
+ ],
+ "notDataActions": []
+ }
+ ]
+ }
+}
+```
+
+### Live stream log fetching
+
+Fetch logs while a task is running:
+
+```json
+{
+ "properties": {
+ "roleName": "seqera-azure-cloud-logs",
+ "description": "Role to be assumed by Seqera Platform to read live-streamed logs for Azure Cloud compute environments",
+ "assignableScopes": [
+ "/subscriptions/"
+ ],
+ "permissions": [
+ {
+ "actions": [
+ "Microsoft.OperationalInsights/workspaces/query/read",
+ "Microsoft.OperationalInsights/workspaces/query/Tables.Custom/read"
+ ],
+ "notActions": [],
+ "dataActions": [
+ "Microsoft.OperationalInsights/workspaces/tables/data/read"
+ ],
+ "notDataActions": []
+ }
+ ]
+ }
+}
+```
+
+### Data Explorer data-links
+
+Work with [Data Explorer](/platform-cloud/data/data-explorer) data-links on Azure:
+
+```json
+{
+ "properties": {
+ "roleName": "seqera-azure-cloud-data-links",
+ "description": "Role assumed by Seqera Platform to access data-links in Azure Cloud compute environments",
+ "assignableScopes": [
+ "/subscriptions/"
+ ],
+ "permissions": [
+ {
+ "actions": [
+ "Microsoft.Storage/storageAccounts/blobServices/containers/read",
+ "Microsoft.Storage/storageAccounts/blobServices/generateUserDelegationKey/action"
+ ],
+ "notActions": [],
+ "dataActions": [
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/read",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write",
+ "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/tags/write"
+ ],
+ "notDataActions": []
+ }
+ ]
+ }
+}
+```
+
+### Compute environment termination and resource disposal
+
+Delete the resources created for the compute environment:
+
+```json
+{
+ "properties": {
+ "roleName": "seqera-azure-cloud-dispose",
+ "description": "Role assumed by Seqera Platform to delete Azure Cloud compute environment resources",
+ "assignableScopes": [
+ "/subscriptions/"
+ ],
+ "permissions": [
+ {
+ "actions": [
+ "Microsoft.Resources/subscriptions/resourceGroups/delete",
+ "Microsoft.Network/publicIPAddresses/delete",
+ "Microsoft.Network/virtualNetworks/delete",
+ "Microsoft.Network/virtualNetworks/subnets/delete",
+ "Microsoft.Network/networkInterfaces/delete",
+ "Microsoft.ManagedIdentity/userAssignedIdentities/delete",
+ "Microsoft.Authorization/roleAssignments/delete",
+ "Microsoft.Authorization/roleDefinitions/delete",
+ "Microsoft.Insights/DataCollectionRules/Delete",
+ "Microsoft.Insights/DataCollectionEndpoints/Delete",
+ "Microsoft.OperationalInsights/workspaces/delete",
+ "Microsoft.OperationalInsights/workspaces/tables/delete"
+ ],
+ "notActions": [],
+ "dataActions": [],
+ "notDataActions": []
+ }
+ ]
+ }
+}
+```
+
+## Azure Batch role assignments
+
+For Azure Batch (rather than Azure Cloud), no custom role is required. Assign these built-in roles to the service principal:
+
+- On the Storage account: **Storage Blob Data Reader** and **Storage Blob Data Contributor**.
+- On the Batch account: **Azure Batch Data Contributor**.
+
+## Create a custom role in Microsoft Entra
+
+1. Save the relevant permissions from the preceding sections to a local JSON file. Replace `` in the `assignableScopes` field with your Azure subscription ID.
+1. In the Azure Portal, go to **Subscriptions** and select your subscription.
+1. Select **Access control (IAM)**, then **Add** in the **Create a custom role** section.
+1. Provide:
+ - **Custom role name**: e.g., `seqera-azure-cloud`
+ - **Description**: e.g., `Role for Seqera Platform to manage Azure Cloud compute environments`
+ - **Baseline permissions**: Select **Start from JSON**
+ - **File**: Select the local JSON file you saved earlier.
+1. Select **Next**, review the permissions and assignable scope, then **Next** again.
+1. Select **Create** to save the role.
+
+## Register an application in Microsoft Entra ID
+
+Create an application that Seqera authenticates as:
+
+1. In the Azure Portal, go to **App registrations** and select **New registration**.
+1. Give the app a descriptive name (e.g., `SeqeraPlatformApp`).
+1. Select **Single tenant** for supported account types.
+1. Under **Certificates & secrets**, select **New client secret** with a description (e.g., `SeqeraPlatformSecret`) and an expiration matching your security policy. Select **Add**.
+1. Copy and save:
+ - **Application (client) ID** — your Client ID
+ - **Directory (tenant) ID** — your Tenant ID
+ - The client secret value (shown once)
+
+## Assign the custom role to the service principal
+
+1. In the Azure Portal, go to **Subscriptions** and select your subscription. Select **Access control (IAM)**, then **Add role assignment** in the **Grant access to this resource** section.
+1. Select the **Privileged administrator roles** tab and select the role you created earlier, then **Next**.
+1. Choose **Select members** and search for the application name (`SeqeraPlatformApp`). Select **Select**, then **Next**.
+1. Under **What user can do**, select **Allow user to assign all roles except privileged administrator roles Owner, UAA, RBAC (Recommended)**, then **Next**.
+1. Select **Review + assign**.
+
+## Next steps
+
+- [Add Azure credentials to Seqera Platform](./credentials).
+- [Configure Storage account and Blob container access](./storage-access).
+- For Azure Batch with manually-managed pools, see [Azure Batch manual setup](./manual-setup).
diff --git a/platform-cloud/docs/integrations/cloud-providers/azure/storage-access.md b/platform-cloud/docs/integrations/cloud-providers/azure/storage-access.md
new file mode 100644
index 000000000..1beb704e7
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/azure/storage-access.md
@@ -0,0 +1,78 @@
+---
+title: "Azure storage access"
+description: "Storage account, Blob container, and Batch account prerequisites for Azure compute environments."
+tags: [azure, storage, blob, batch, integration]
+---
+
+Seqera Platform on Azure requires:
+
+- An **Azure resource group** to contain the resources.
+- An **Azure Storage account** with at least one Blob container for the Nextflow work directory.
+- An **Azure Batch account** (Azure Batch only).
+
+The IAM/role permissions to access these resources are documented in [Roles & service principals](./roles-and-service-principals).
+
+## Resource group
+
+Create a resource group to contain the Azure Batch and Azure Storage resources.
+
+:::note
+A resource group can also be created on the fly when creating an Azure Storage or Azure Batch account.
+:::
+
+1. Log in to the Azure Portal, go to [Create Resource group](https://portal.azure.com/#create/Microsoft.ResourceGroup), and select **Create new resource group**.
+1. Enter a name (e.g., `seqeracompute`).
+1. Choose the preferred region.
+1. Select **Review and Create**, then **Create**.
+
+## Storage account
+
+1. Log in to the Azure Portal, go to [Create storage account](https://portal.azure.com/#create/Microsoft.StorageAccount-ARM), and select **Create a storage account**.
+1. Enter a name (e.g., `seqeracomputestorage`).
+1. Choose the preferred region. This must be the same region as the Batch account.
+1. Platform supports all performance and redundancy settings — select what fits your use case.
+1. Select **Next: Advanced**.
+1. Enable **storage account key access**.
+1. Select **Next: Networking**.
+ - Enable public access from all networks. You can scope to specific virtual networks and IP addresses, but you cannot use Forge to create compute resources with that configuration. Disabling public access is not supported.
+1. Configure **Data protection** (all settings supported) and **Encryption** (Microsoft-managed keys only).
+1. Add tags as needed.
+1. Select **Review and Create**, then **Create**.
+1. Go to your new Storage account and select **+ Container** to create a Blob container (e.g., `seqeracomputestorage-container`).
+1. Save the access keys from the **Access Keys** section if you plan to use shared-key authentication.
+
+:::caution
+Blob container storage credentials are associated with the Batch pool configuration. Avoid changing these credentials in your Seqera instance after creating the compute environment.
+:::
+
+## Batch account (Azure Batch only)
+
+1. Log in to the Azure Portal and go to [Create a batch account](https://portal.azure.com/#create/Microsoft.BatchAccount).
+1. Select an existing resource group or create a new one.
+1. Enter a name (e.g., `seqeracomputebatch`).
+1. Choose the preferred region — must match the Storage account region.
+1. Select **Advanced**:
+ - **Pool allocation mode**: **Batch service**
+ - **Authentication mode**: **Shared Key**
+1. Configure **Networking** to allow Platform access.
+1. Select **Review and Create**, then **Create**.
+1. From your new Batch account, copy the **Access Keys** if you plan to use shared-key authentication.
+
+:::caution
+A newly-created Azure Batch account may not be entitled to create virtual machines without making a service request to Azure. See [Azure Batch service quotas and limits](https://docs.microsoft.com/en-us/azure/batch/batch-quota-limit#view-batch-quotas).
+:::
+
+## Quotas
+
+Check and increase Batch account quotas as needed:
+
+- **Active jobs and schedules**: Each Nextflow process requires an active Azure Batch job per pipeline while running. Set this to a high level. See [jobs in Azure Batch](https://learn.microsoft.com/en-us/azure/batch/jobs-and-tasks).
+- **Pools**: Each compute environment requires at least one pool. Batch Forge creates two pools by default (head + compute).
+- **Batch accounts per region per subscription**: Set this to the number of accounts per region per subscription. One is usually enough.
+- **Total Dedicated vCPUs per VM series**: Increase by the number of required concurrent CPUs. Machines are charged per CPU minute, so a higher quota does not increase cost. We recommend the latest E-series for cost-effective bioinformatics workloads.
+
+## Next steps
+
+- [Create the custom role and register the application](./roles-and-service-principals).
+- [Add credentials to Seqera Platform](./credentials).
+- Create the [Azure Batch](/platform-cloud/compute-envs/azure-batch) or [Azure Cloud](/platform-cloud/compute-envs/azure-cloud) compute environment.
diff --git a/platform-cloud/docs/integrations/cloud-providers/google/credentials.md b/platform-cloud/docs/integrations/cloud-providers/google/credentials.md
new file mode 100644
index 000000000..11b1bf5af
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/google/credentials.md
@@ -0,0 +1,37 @@
+---
+title: "Google Cloud credentials"
+description: "Generate a service account JSON key and add Google Cloud credentials to Seqera Platform."
+tags: [gcp, google, credentials, integration]
+---
+
+Seqera authenticates to Google Cloud using a service account JSON key file. The same procedure applies to both Google Cloud Batch and Google Cloud compute environments.
+
+## Generate a service account JSON key
+
+After creating a [custom service account with the required IAM roles](./iam), generate a JSON key:
+
+1. In the Google Cloud navigation menu, select **IAM & Admin > Service Accounts**.
+1. Select the email address of the service account.
+
+ :::note
+ The Compute Engine default service account is not recommended for production due to its powerful permissions. To use a service account other than the Compute Engine default, specify the service account email address under **Advanced options** on the Seqera compute environment creation form.
+ :::
+
+1. Select **Keys > Add key > Create new key**.
+1. Select **JSON** as the key type.
+1. Select **Create**.
+
+A JSON file downloads to your computer. This file contains the credential needed for the Seqera compute environment.
+
+You can manage your key from the **Service Accounts** page.
+
+## Add credentials in Seqera
+
+1. From the **Credentials** drop-down on the compute environment creation form, select **+** to add new credentials.
+1. Enter a name (e.g., `Google Cloud Credentials`).
+1. Paste the contents of the JSON file in the **Service account key** field.
+
+## Next steps
+
+- Create the [Google Cloud Batch](/platform-cloud/compute-envs/google-cloud-batch) or [Google Cloud](/platform-cloud/compute-envs/google-cloud) compute environment.
+- [Set up Cloud Storage bucket access](./storage-access).
diff --git a/platform-cloud/docs/integrations/cloud-providers/google/iam.md b/platform-cloud/docs/integrations/cloud-providers/google/iam.md
new file mode 100644
index 000000000..ee6668851
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/google/iam.md
@@ -0,0 +1,78 @@
+---
+title: "Google Cloud IAM"
+description: "Project, APIs, and service account IAM roles for Seqera Platform on Google Cloud."
+tags: [gcp, google, iam, integration]
+---
+
+Seqera Platform requires a Google Cloud project with the right APIs enabled and a service account with the right IAM roles. This page covers both compute environment types: Google Cloud Batch and Google Cloud (preview).
+
+## Create a project
+
+Go to the [Google Project Selector page](https://console.cloud.google.com/projectselector2) and select an existing project, or select **Create project**.
+
+Enter a name (e.g., `tower-nf`). If you are part of an organization, the location will default to your organization.
+
+## Enable billing
+
+See [Modify a project's billing settings](https://cloud.google.com/billing/docs/how-to/modify-project) to enable billing in your Google Cloud account.
+
+## Enable APIs
+
+[Enable the following APIs](https://console.cloud.google.com/flows/enableapi?apiid=batch.googleapis.com%2Ccompute.googleapis.com%2Cstorage-api.googleapis.com) for your project:
+
+- Batch API
+- Compute Engine API
+- Cloud Storage API
+
+Or enable each manually:
+
+- [Batch API](https://console.cloud.google.com/marketplace/product/google/batch.googleapis.com)
+- [Compute Engine API](https://console.cloud.google.com/marketplace/product/google/compute.googleapis.com)
+- [Cloud Storage API](https://console.cloud.google.com/marketplace/product/google/storage-api.googleapis.com)
+
+## Service account permissions
+
+Seqera requires a service account with appropriate permissions to interact with Google Cloud resources. As an IAM user, you must have access to the service account that submits Batch jobs.
+
+:::caution
+By default, Google Cloud Batch uses the default Compute Engine service account, which is granted the Editor (`roles/Editor`) role. While this is sufficient for Seqera, this role is not recommended for production. Control job access using a custom service account with only the permissions necessary for Seqera.
+:::
+
+### Google Cloud Batch permissions
+
+[Create a custom service account](https://cloud.google.com/iam/docs/service-accounts-create#creating) with at least:
+
+- Batch Agent Reporter (`roles/batch.agentReporter`) on the project
+- Batch Job Editor (`roles/batch.jobsEditor`) on the project
+- Logs Writer (`roles/logging.logWriter`) on the project (for jobs to generate Cloud Logging logs)
+- Logs Viewer (`roles/logging.logViewer`) on the project (to view and retrieve Cloud Logging logs)
+- Service Account User (`roles/iam.serviceAccountUser`)
+
+If your Google Cloud project does not require access restrictions on Cloud Storage buckets, grant project Storage Admin (`roles/storage.admin`) to your service account to simplify setup. To grant access only to specific buckets, add the service account as a principal on each bucket individually. See [Storage access](./storage-access).
+
+### Google Cloud permissions
+
+For the Google Cloud (preview) compute environment, the service account requires:
+
+- Compute Instance Admin (`roles/compute.instanceAdmin.v1`)
+- Project IAM Admin (`roles/resourcemanager.projectIamAdmin`)
+- Service Account Admin (`roles/iam.serviceAccountAdmin`)
+- Service Account User (`roles/iam.serviceAccountUser`)
+- Service Usage Consumer (`roles/serviceusage.serviceUsageConsumer`)
+
+If your Google Cloud project does not require access restrictions on Cloud Storage buckets, grant project Storage Admin (`roles/storage.admin`) to simplify setup.
+
+For each Google Cloud compute environment created in Seqera, a separate service account is created with the necessary permissions to launch pipelines and Studios.
+
+### User permissions
+
+Ask your Google Cloud administrator to grant you the following IAM user permissions to interact with the custom service account:
+
+- Batch Job Editor (`roles/batch.jobsEditor`) on the project
+- Service Account User (`roles/iam.serviceAccountUser`) on the job's service account
+- View Service Accounts (`roles/iam.serviceAccountViewer`) on the project
+
+## Next steps
+
+- [Generate a service account JSON key and add credentials in Seqera](./credentials).
+- [Create a Cloud Storage bucket](./storage-access) for the work directory.
diff --git a/platform-cloud/docs/integrations/cloud-providers/google/overview.md b/platform-cloud/docs/integrations/cloud-providers/google/overview.md
new file mode 100644
index 000000000..4f207f7e2
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/google/overview.md
@@ -0,0 +1,26 @@
+---
+title: "Google Cloud integration"
+description: "Connect Seqera Platform to Google Cloud — IAM, service accounts, credentials, and storage."
+tags: [gcp, google, integration]
+---
+
+Seqera Platform integrates with Google Cloud to run pipelines on Google Cloud Batch and Google Cloud (preview), and to read input/output data from Cloud Storage. This page is the entry point for the Google Cloud-side setup.
+
+## Setup checklist
+
+| Step | What to do | Page |
+|---|---|---|
+| 1 | Create a project, enable billing, and enable required APIs | [IAM](./iam) |
+| 2 | Create a custom service account with required permissions | [IAM](./iam) |
+| 3 | Create a Cloud Storage bucket for the work directory | [Storage access](./storage-access) |
+| 4 | Generate a service account JSON key and add credentials in Seqera | [Credentials](./credentials) |
+| 5 | Create the compute environment | [Google Cloud Batch](/platform-cloud/compute-envs/google-cloud-batch) or [Google Cloud](/platform-cloud/compute-envs/google-cloud) |
+
+## Choose a compute environment type
+
+| Compute environment | When to use it |
+|---|---|
+| **Google Cloud Batch** | Production-scale pipelines, mature Batch API workloads |
+| **Google Cloud** (preview) | Studios and small/medium pipelines, simplified setup, fewer GCP dependencies |
+
+For Google Cloud Batch, use the IAM roles in [IAM](./iam#google-cloud-batch-permissions). For Google Cloud, use the service account permissions in [IAM](./iam#google-cloud-permissions).
diff --git a/platform-cloud/docs/integrations/cloud-providers/google/storage-access.md b/platform-cloud/docs/integrations/cloud-providers/google/storage-access.md
new file mode 100644
index 000000000..1a3cdd35b
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/google/storage-access.md
@@ -0,0 +1,41 @@
+---
+title: "Google Cloud storage access"
+description: "Cloud Storage bucket creation and permissions for Seqera Platform on Google Cloud."
+tags: [gcp, google, storage, gcs, integration]
+---
+
+Google Cloud Storage is a type of **object storage**. Create a Cloud Storage bucket that your Seqera service account can access to use as the Nextflow work directory and to store input/output data.
+
+## Create a Cloud Storage bucket
+
+1. In the Google Cloud Console hamburger menu (**≡**), select **Cloud Storage**.
+1. From the **Buckets** tab, select **Create**.
+1. Enter a name for your bucket. You will reference this name in the Seqera compute environment.
+1. Select **Region** for the **Location type** and select the **Location** for your bucket. You'll reference this location in the Seqera compute environment.
+
+ :::note
+ The Batch API is available in a [limited number of locations](https://cloud.google.com/batch/docs/locations). These locations are only used to store metadata about the pipeline operations — the storage bucket and compute resources can be in any region.
+ :::
+
+1. Select **Standard** as the default storage class.
+1. To restrict public access, select the **Enforce public access prevention on this bucket** checkbox.
+1. Under **Access control**, select **Uniform**.
+1. Select any additional object data protection tools per your organization's data protection requirements.
+1. Select **Create**.
+
+## Assign bucket permissions
+
+After the bucket is created, you are redirected to the **Bucket details** page.
+
+1. Select **Permissions**, then **Grant access** under **View by principals**.
+1. Copy the email address of your service account into **New principals**.
+1. Select the **Storage Admin** role, then select **Save**.
+
+:::tip
+You've created a project, enabled the necessary Google APIs, created a bucket, and created a service account JSON key file with the required credentials. You now have what you need to set up a new compute environment in Seqera.
+:::
+
+## Next steps
+
+- [Add credentials in Seqera](./credentials).
+- Create the [Google Cloud Batch](/platform-cloud/compute-envs/google-cloud-batch) or [Google Cloud](/platform-cloud/compute-envs/google-cloud) compute environment.
diff --git a/platform-cloud/docs/integrations/cloud-providers/overview.md b/platform-cloud/docs/integrations/cloud-providers/overview.md
new file mode 100644
index 000000000..39785b1e3
--- /dev/null
+++ b/platform-cloud/docs/integrations/cloud-providers/overview.md
@@ -0,0 +1,27 @@
+---
+title: "Cloud providers"
+description: "IAM, credentials, and data access setup for AWS, Azure, and Google Cloud."
+tags: [cloud, aws, azure, gcp, integration]
+---
+
+Seqera Platform supports three cloud providers as compute backends. Each section here is the canonical reference for the IAM/role policies and credentials Seqera needs in that cloud account, independent of which compute environment type (Batch, Cloud, EKS) you create.
+
+## Choose your cloud
+
+| Cloud | Compute environment options | Setup |
+|---|---|---|
+| **AWS** | AWS Batch, AWS Cloud, Amazon EKS | [AWS integration](./aws/overview) |
+| **Azure** | Azure Batch, Azure Cloud | [Azure integration](./azure/overview) |
+| **Google Cloud** | Google Cloud Batch, Google Cloud | [Google Cloud integration](./google/overview) |
+
+## What each section covers
+
+Each cloud section contains:
+
+- **Overview** — when to use each compute environment type for that cloud, and the credential models supported.
+- **IAM policies / roles** — the canonical permissions Seqera Platform needs in your cloud account.
+- **Credentials** — how to create IAM users, IAM roles, service principals, or service account keys, and how to add them to Seqera.
+- **Data access / Storage** — bucket / container / file system prerequisites for the work directory.
+- **Manual setup** (AWS and Azure only) — how to create cloud-side resources manually instead of letting Seqera Forge create them.
+
+After completing the cloud-side setup, create the compute environment in Seqera using the relevant guide under [Compute](/platform-cloud/compute-envs/overview).
diff --git a/platform-cloud/docs/credentials/aws_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/aws.md
similarity index 100%
rename from platform-cloud/docs/credentials/aws_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/aws.md
diff --git a/platform-cloud/docs/credentials/azure_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/azure.md
similarity index 100%
rename from platform-cloud/docs/credentials/azure_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/azure.md
diff --git a/platform-cloud/docs/credentials/docker_hub_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/docker-hub.md
similarity index 100%
rename from platform-cloud/docs/credentials/docker_hub_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/docker-hub.md
diff --git a/platform-cloud/docs/credentials/gitea_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/gitea.md
similarity index 100%
rename from platform-cloud/docs/credentials/gitea_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/gitea.md
diff --git a/platform-cloud/docs/credentials/github_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/github.md
similarity index 100%
rename from platform-cloud/docs/credentials/github_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/github.md
diff --git a/platform-cloud/docs/credentials/gitlab_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/gitlab.md
similarity index 100%
rename from platform-cloud/docs/credentials/gitlab_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/gitlab.md
diff --git a/platform-cloud/docs/credentials/google_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/google.md
similarity index 100%
rename from platform-cloud/docs/credentials/google_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/google.md
diff --git a/platform-cloud/docs/integrations/container-registries/overview.md b/platform-cloud/docs/integrations/container-registries/overview.md
new file mode 100644
index 000000000..c6825aa3d
--- /dev/null
+++ b/platform-cloud/docs/integrations/container-registries/overview.md
@@ -0,0 +1,32 @@
+---
+title: "Container registry credentials"
+description: "Configure container registry credentials for the Wave container service in Seqera Platform."
+date created: "2025-10-21"
+last updated: "2025-10-21"
+tags: [credentials, container-registry, wave, docker, authentication]
+---
+
+Seqera Platform supports the configuration of credentials for the Wave container service to authenticate to private and public container registries. For more information about Wave, see [Wave containers](https://docs.seqera.io/wave).
+
+:::note
+Container registry credentials are only used by Wave containers. Enable Wave when you create a [compute environment](/platform-cloud/compute-envs/overview) in Seqera, or add `wave { enabled=true }` to your `nextflow.config` file, for your pipeline execution to use Wave containers.
+:::
+
+## Supported container registries
+
+Seqera supports credential configuration for the following container registries. Select a registry below for detailed instructions on how to create and configure credentials for that provider:
+
+- [AWS ECR credentials](./aws.md)
+- [Azure container registry credentials](./azure.md)
+- [Docker Hub credentials](./docker-hub.md)
+- [Gitea container registry credentials](./gitea.md)
+- [GitHub container registry credentials](./github.md)
+- [GitLab container registry credentials](./gitlab.md)
+- [Google registry credentials](./google.md)
+- [Quay container registry credentials](./quay.md)
+
+## Next steps
+
+- Learn more about [Wave containers](https://docs.seqera.io/wave/provisioning).
+- Configure [compute environment credentials](/platform-cloud/compute-envs/overview) for your pipeline infrastructure.
+- Set up [data repository credentials](/platform-cloud/integrations/data-sources/overview) to access cloud storage.
diff --git a/platform-cloud/docs/credentials/quay_registry_credentials.md b/platform-cloud/docs/integrations/container-registries/quay.md
similarity index 100%
rename from platform-cloud/docs/credentials/quay_registry_credentials.md
rename to platform-cloud/docs/integrations/container-registries/quay.md
diff --git a/platform-cloud/docs/credentials/data_repositories.md b/platform-cloud/docs/integrations/data-sources/overview.md
similarity index 100%
rename from platform-cloud/docs/credentials/data_repositories.md
rename to platform-cloud/docs/integrations/data-sources/overview.md
diff --git a/platform-cloud/docs/integrations/git-providers/azure-devops.md b/platform-cloud/docs/integrations/git-providers/azure-devops.md
new file mode 100644
index 000000000..5fa5a462f
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/azure-devops.md
@@ -0,0 +1,17 @@
+---
+title: "Azure DevOps"
+description: "Connect Seqera Platform to private Azure DevOps repositories."
+tags: [git, azure, azure-devops, integration]
+---
+
+Authenticate to Azure DevOps repositories using a [personal access token (PAT)](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#about-pats).
+
+## Create Azure DevOps credentials
+
+After creating and copying your access token, create a new credential in Seqera:
+
+1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
+1. Enter a **Name** for the new credentials.
+1. Select **Azure DevOps** as the **Provider**.
+1. Enter your **Username** and **Access token**.
+1. (Recommended) Enter the **Repository base URL** to scope the credentials to a specific repository (e.g., `https://dev.azure.com//`).
diff --git a/platform-cloud/docs/integrations/git-providers/bitbucket.md b/platform-cloud/docs/integrations/git-providers/bitbucket.md
new file mode 100644
index 000000000..b0b925a35
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/bitbucket.md
@@ -0,0 +1,21 @@
+---
+title: "Bitbucket"
+description: "Connect Seqera Platform to private Bitbucket repositories."
+tags: [git, bitbucket, integration]
+---
+
+To connect to a private Bitbucket repository, see [API tokens](https://support.atlassian.com/bitbucket-cloud/docs/api-tokens/) to create a Bitbucket API token. The API token must have at least `read:repository:bitbucket` scope.
+
+:::warning
+API tokens are tied to users. This differs from access tokens, which are tied to a specific resource. While Seqera supports API tokens, access tokens are not supported for accessing Bitbucket repositories.
+
+API tokens replace [app passwords](https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/), which can no longer be created after September 9, 2025 and will be phased out June 9, 2026. While app passwords are still supported, they are not recommended. See [Bitbucket Cloud transitions to API tokens](https://www.atlassian.com/blog/bitbucket/bitbucket-cloud-transitions-to-api-tokens-enhancing-security-with-app-password-deprecation) for more information.
+:::
+
+## Create Bitbucket credentials
+
+1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
+1. Enter a **Name** for the new credentials.
+1. Select **BitBucket** as the **Provider**.
+1. Enter your **Username** (account email) and **Token**.
+1. (Recommended) Enter the **Repository base URL** to scope the credentials to a specific repository (e.g., `https://bitbucket.org/seqeralabs`).
diff --git a/platform-cloud/docs/integrations/git-providers/codecommit.md b/platform-cloud/docs/integrations/git-providers/codecommit.md
new file mode 100644
index 000000000..06bc90f4c
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/codecommit.md
@@ -0,0 +1,17 @@
+---
+title: "AWS CodeCommit"
+description: "Connect Seqera Platform to private AWS CodeCommit repositories."
+tags: [git, aws, codecommit, integration]
+---
+
+To connect to a private AWS CodeCommit repository, see the [AWS documentation](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html) for IAM permissions for CodeCommit.
+
+## Create AWS CodeCommit credentials
+
+Use your IAM account access key and secret key:
+
+1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
+1. Enter a **Name** for the new credentials.
+1. Select **CodeCommit** as the **Provider**.
+1. Enter the **Access key** and **Secret key** of the AWS IAM account.
+1. (Recommended) Enter the **Repository base URL** to scope the credentials to a specific region (e.g., `https://git-codecommit.eu-west-1.amazonaws.com`).
diff --git a/platform-cloud/docs/integrations/git-providers/gitea.md b/platform-cloud/docs/integrations/git-providers/gitea.md
new file mode 100644
index 000000000..ccf023c02
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/gitea.md
@@ -0,0 +1,16 @@
+---
+title: "Gitea"
+description: "Connect Seqera Platform to private Gitea repositories."
+tags: [git, gitea, integration]
+---
+
+To connect to a private [Gitea](https://gitea.io/) repository, use your Gitea user credentials.
+
+## Create Gitea credentials
+
+1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
+1. Enter a **Name** for the new credentials.
+1. Select **Gitea** as the **Provider**.
+1. Enter your **Username**.
+1. Enter your **Password**.
+1. Enter your **Repository base URL** (required).
diff --git a/platform-cloud/docs/integrations/git-providers/github.md b/platform-cloud/docs/integrations/git-providers/github.md
new file mode 100644
index 000000000..fb9f7521c
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/github.md
@@ -0,0 +1,25 @@
+---
+title: "GitHub"
+description: "Connect Seqera Platform to private GitHub repositories."
+tags: [git, github, integration]
+---
+
+Use an access token to connect Seqera Platform to a private [GitHub](https://github.com/) repository. Personal (classic) or fine-grained access tokens can be used.
+
+:::info
+A user's personal access token (classic) can access every repository the user has access to. GitHub recommends using fine-grained personal access tokens (currently in beta), which can be restricted to specific repositories with granular permissions instead of broad scopes.
+:::
+
+For personal (classic) tokens, grant access to the private repository by selecting the main `repo` scope when the token is created. See [Creating a personal access token (classic)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#creating-a-personal-access-token-classic).
+
+For fine-grained tokens, the repository's organization must [opt in](https://docs.github.com/en/organizations/managing-programmatic-access-to-your-organization/setting-a-personal-access-token-policy-for-your-organization) to fine-grained tokens. See [Creating a fine-grained personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token).
+
+## Create GitHub credentials
+
+After you've created and copied your access token, create a new credential in Seqera:
+
+1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
+1. Enter a **Name** for the new credentials.
+1. Select **GitHub** as the **Provider**.
+1. Enter your **Username** and **Access token**.
+1. (Recommended) Enter the **Repository base URL** for which the credentials should be applied (e.g., `https://github.com/seqeralabs`).
diff --git a/platform-cloud/docs/integrations/git-providers/gitlab.md b/platform-cloud/docs/integrations/git-providers/gitlab.md
new file mode 100644
index 000000000..a04a94b61
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/gitlab.md
@@ -0,0 +1,16 @@
+---
+title: "GitLab"
+description: "Connect Seqera Platform to private GitLab repositories."
+tags: [git, gitlab, integration]
+---
+
+GitLab supports [Personal](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html), [Group](https://docs.gitlab.com/ee/user/group/settings/group_access_tokens.html#group-access-tokens), and [Project](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html) access tokens for authentication. Your access token must have the `api`, `read_api`, and `read_repository` scopes. For all three token types, use the token value in both the **Password** and **Access token** fields.
+
+## Create GitLab credentials
+
+1. From an organization workspace: Select **Credentials** > **Add Credentials**. From your personal workspace: Go to the user menu and select **Your credentials** > **Add credentials**.
+1. Enter a **Name** for the new credentials.
+1. Select **GitLab** as the **Provider**.
+1. Enter your **Username**. For Group and Project access tokens, the username can be any non-empty value.
+1. Enter your token value in both the **Password** and **Access token** fields.
+1. (Recommended) Enter the **Repository base URL** to scope the credentials to a specific repository (e.g., `https://gitlab.com/seqeralabs`).
diff --git a/platform-cloud/docs/integrations/git-providers/multiple-credentials.md b/platform-cloud/docs/integrations/git-providers/multiple-credentials.md
new file mode 100644
index 000000000..6079d801e
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/multiple-credentials.md
@@ -0,0 +1,42 @@
+---
+title: "Multiple credentials"
+description: "How Seqera Platform selects the right Git credential when multiple are configured."
+tags: [git, credentials, integration]
+---
+
+When you have multiple stored Git credentials, Seqera selects the most relevant credential for your repository in the following order:
+
+1. Seqera evaluates all stored credentials available to the current workspace.
+1. Credentials are filtered by Git provider (GitHub, GitLab, Bitbucket, etc.)
+1. Seqera selects the credential with a Repository base URL most similar to the target repository.
+1. If no Repository base URL values are specified in the workspace credentials, the most long-lived credential is selected.
+
+## Credential filtering example
+
+Workspace A contains four credentials:
+
+**Credential A**
+- Type: GitHub
+- Repository base URL:
+
+**Credential B**
+- Type: GitHub
+- Repository base URL: `https://github.com/`
+
+**Credential C**
+- Type: GitHub
+- Repository base URL: `https://github.com/pipeline-repo`
+
+**Credential D**
+- Type: GitLab
+- Repository base URL: `https://gitlab.com/repo-a`
+
+If you launch a pipeline with a Nextflow workflow in `https://github.com/pipeline-repo`, Seqera uses Credential C.
+
+## Recommendations
+
+For Seqera to select the most appropriate credential for your repository:
+
+- Specify the Repository base URL values as completely as possible for each Git credential used in the workspace.
+- Favor service account-type credentials where possible (such as GitLab group access tokens).
+- Avoid storing multiple user-based tokens with similar permissions.
diff --git a/platform-cloud/docs/integrations/git-providers/overview.md b/platform-cloud/docs/integrations/git-providers/overview.md
new file mode 100644
index 000000000..c58356d9d
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/overview.md
@@ -0,0 +1,44 @@
+---
+title: "Git providers"
+description: "Connect Seqera Platform to GitHub, GitLab, Bitbucket, Gitea, Azure DevOps, and AWS CodeCommit."
+tags: [git, integration]
+---
+
+Data pipelines are composed of many assets — pipeline scripts, configuration files, dependency descriptors (Conda, Docker), documentation, etc. When you manage these as Git repositories, all assets can be versioned and deployed with a specific tag, release, or commit ID. Version control and containerization are crucial for reproducible pipeline executions and continuous validation as the code evolves.
+
+Seqera Platform has built-in support for [Git](https://git-scm.com) and the most popular Git-hosting platforms.
+
+## Public repositories
+
+Launch a public Nextflow pipeline by entering its Git repository URL in the **Pipeline to launch** field.
+
+When you specify the **Revision** number, the available revisions are pulled using the Git provider's API. By default, the default branch (usually `main` or `master`) is used.
+
+:::tip
+[nf-core](https://nf-co.re/pipelines) is a great resource for public Nextflow pipelines.
+:::
+
+:::info
+The GitHub API imposes [rate limits](https://docs.github.com/en/developers/apps/building-github-apps/rate-limits-for-github-apps) on API requests. Increase your rate limit by adding [GitHub credentials](./github) to your workspace.
+:::
+
+## Private repositories
+
+To access private Nextflow pipelines, add credentials for your private Git hosting provider:
+
+- [GitHub](./github)
+- [GitLab](./gitlab)
+- [Bitbucket](./bitbucket)
+- [Gitea](./gitea)
+- [Azure DevOps](./azure-devops)
+- [AWS CodeCommit](./codecommit)
+
+If you use multiple Git credentials in the same workspace, see [Multiple credential filtering](./multiple-credentials) for the credential selection rules.
+
+:::info
+Credentials are encrypted with AES-256 before secure storage and are never exposed in an unencrypted way by any Seqera API.
+:::
+
+## Seqera AI Git access
+
+Seqera AI's GitHub integration is set up separately. See [Seqera AI Git access](./seqera-ai).
diff --git a/platform-cloud/docs/integrations/git-providers/seqera-ai.md b/platform-cloud/docs/integrations/git-providers/seqera-ai.md
new file mode 100644
index 000000000..cc45ae587
--- /dev/null
+++ b/platform-cloud/docs/integrations/git-providers/seqera-ai.md
@@ -0,0 +1,38 @@
+---
+title: "Seqera AI Git access"
+description: "Configure GitHub access for Seqera AI."
+tags: [git, github, seqera-ai, integration]
+---
+
+[Seqera AI](https://seqera.io/ask-ai/chat-v2) integrates with your pipeline GitHub repositories to provide intelligent assistance with pipeline development and modification. To fully utilize Seqera AI, it needs access to your pipeline codebase to analyze, suggest changes, and create pull requests on your behalf.
+
+This integration is **separate** from Seqera Platform Git credentials — they are configured independently in different products.
+
+## Set up GitHub access
+
+To enable Seqera AI to interact with your pipeline GitHub repositories:
+
+1. **Generate a personal access token**
+ - Navigate to [GitHub Personal Access Tokens](https://github.com/settings/personal-access-tokens).
+ - Create a new token with the following permissions:
+ - **Pull Requests**: Read & Write
+ - **Contents**: Read & Write
+ - Your token value is displayed only once. Copy it before navigating away from the tokens page.
+
+1. **Add the token to Seqera AI**
+ - Open [Seqera AI](https://seqera.io/ask-ai/chat-v2).
+ - In the bottom-left user menu, select **Add token**.
+ - Enter your personal access token, then select **Set token**.
+
+## Capabilities
+
+With proper GitHub access configured, Seqera AI can:
+
+- Access and analyze your pipeline codebase
+- Create feature branches for proposed changes
+- Generate pull requests for your review
+- Suggest improvements based on your existing code patterns
+
+:::tip
+Seqera AI respects your repository's branch protection rules and creates pull requests for review rather than directly modifying protected branches.
+:::
diff --git a/platform-cloud/docs/credentials/managed_identities.md b/platform-cloud/docs/integrations/identity/managed-identities.md
similarity index 100%
rename from platform-cloud/docs/credentials/managed_identities.md
rename to platform-cloud/docs/integrations/identity/managed-identities.md
diff --git a/platform-cloud/docs/credentials/ssh_credentials.md b/platform-cloud/docs/integrations/identity/ssh-credentials.md
similarity index 98%
rename from platform-cloud/docs/credentials/ssh_credentials.md
rename to platform-cloud/docs/integrations/identity/ssh-credentials.md
index dcc3b1873..8cf1f5703 100644
--- a/platform-cloud/docs/credentials/ssh_credentials.md
+++ b/platform-cloud/docs/integrations/identity/ssh-credentials.md
@@ -8,7 +8,7 @@ tags: [ssh, credentials]
SSH public key authentication relies on asymmetric cryptography to generate a public and private key pair. The public key remains on the target (remote) machine, while the private key (and passphrase) is stored in Seqera Platform as a credential. The key pair is used to authenticate a connection with your SSH-enabled environment.
-To preserve individual user identities by using multiple user SSH credentials to access your HPC compute environments, see [Managed identities](./managed_identities).
+To preserve individual user identities by using multiple user SSH credentials to access your HPC compute environments, see [Managed identities](./managed-identities).
:::note
All credentials are (AES-256) encrypted before secure storage and not exposed in an unencrypted way by any Seqera API.
diff --git a/platform-cloud/docs/integrations/overview.md b/platform-cloud/docs/integrations/overview.md
new file mode 100644
index 000000000..b04721f73
--- /dev/null
+++ b/platform-cloud/docs/integrations/overview.md
@@ -0,0 +1,44 @@
+---
+title: "Integrations overview"
+description: "How Seqera Platform connects to external systems and what permissions it needs."
+tags: [integrations, credentials]
+---
+
+Seqera Platform connects to a range of external systems — cloud providers, Git hosts, container registries, data sources, and HPC schedulers — to run pipelines, manage data, and orchestrate compute.
+
+This section is the single home for **how to set those integrations up** and **what permissions each one needs**.
+
+## Where to start
+
+| If you want to... | Go to |
+|---|---|
+| Run pipelines on **AWS Batch**, **AWS Cloud**, or **Amazon EKS** | [AWS](./cloud-providers/aws/overview) |
+| Run pipelines on **Azure Batch** or **Azure Cloud** | [Azure](./cloud-providers/azure/overview) |
+| Run pipelines on **Google Cloud Batch** or **Google Cloud** | [Google Cloud](./cloud-providers/google/overview) |
+| Pull pipelines from a **private Git repository** | [Git providers](./git-providers/overview) |
+| Pull container images from a **private registry** (Wave) | [Container registries](./container-registries/overview) |
+| Connect to **cloud storage** (S3 / Blob / GCS) for data | [Data sources](./data-sources/overview) |
+| Use **managed identity** or **SSH** for compute access | [Identity & access](./identity/managed-identities) |
+| Set up the **Seqera Agent** for hybrid or on-premises | [Agent credentials](./agent/credentials) |
+
+## How credentials are stored
+
+:::note
+Seqera Platform encrypts all credentials with AES-256 encryption before storing them. No Seqera API exposes credentials in an unencrypted way.
+:::
+
+## Cloud provider integrations
+
+The cloud provider sections cover IAM/role policies, credential creation, data access, and provider-specific extras. They are the canonical reference for the permissions Seqera needs in each cloud account.
+
+- [AWS](./cloud-providers/aws/overview) — IAM policies, IAM user/role, S3/EFS/FSx, EKS additions, manual Batch setup
+- [Azure](./cloud-providers/azure/overview) — custom roles, service principals, Storage and Batch accounts, manual Batch setup
+- [Google Cloud](./cloud-providers/google/overview) — service account IAM, JSON keys, Cloud Storage
+
+## Other integrations
+
+- [Git providers](./git-providers/overview) — GitHub, GitLab, Bitbucket, Gitea, Azure DevOps, AWS CodeCommit, plus Seqera AI Git access.
+- [Container registries](./container-registries/overview) — Docker Hub, AWS ECR, Azure ACR, Google Artifact Registry, Quay, GitHub/GitLab/Gitea registries.
+- [Data sources](./data-sources/overview) — S3, Azure Blob, GCS, S3-compatible.
+- [Identity & access](./identity/managed-identities) — Azure managed identities for compute environments, [SSH credentials](./identity/ssh-credentials) for HPC.
+- [Agent](./agent/credentials) — Seqera Agent authentication for hybrid and on-premises deployments.
diff --git a/platform-cloud/docs/launch/cache-resume.mdx b/platform-cloud/docs/launch/cache-resume.mdx
index b1d18e65f..b917a786d 100644
--- a/platform-cloud/docs/launch/cache-resume.mdx
+++ b/platform-cloud/docs/launch/cache-resume.mdx
@@ -107,7 +107,7 @@ Kubernetes compute environments do not use cloud cache by default. To specify a
Google Cloud Storage
- 1. See [these instructions](../compute-envs/google-cloud-batch#iam) to set up IAM and create a JSON key file for the custom service account with permissions to your Google Cloud storage account.
+ 1. See [these instructions](/platform-cloud/integrations/cloud-providers/google/iam) to set up IAM and create a JSON key file for the custom service account with permissions to your Google Cloud storage account.
2. If you run the [gcloud CLI authentication flow](https://docs.seqera.io/nextflow/google#credentials) with `gcloud auth application-default login`, your Application Default Credentials are written to `$HOME/.config/gcloud/application_default_credentials.json` and picked up by Nextflow automatically. Otherwise, declare the `GOOGLE_APPLICATION_CREDENTIALS` environment variable explicitly with the local path to your service account credentials file created in the previous step.
3. Add the following to the **Nextflow Config file** field when you [launch](../launch/launchpad#launch-form) your pipeline:
diff --git a/platform-cloud/docs/launch/launchpad.md b/platform-cloud/docs/launch/launchpad.md
index ce19ad572..459e68e11 100644
--- a/platform-cloud/docs/launch/launchpad.md
+++ b/platform-cloud/docs/launch/launchpad.md
@@ -31,9 +31,9 @@ The launch form accepts URL query parameters. See [Populate launch form with URL
### General config
-- **Pipeline to launch**: A Git repository name or URL. For saved pipelines, this is prefilled and cannot be edited. Private repositories require [access credentials](../credentials/overview).
+- **Pipeline to launch**: A Git repository name or URL. For saved pipelines, this is prefilled and cannot be edited. Private repositories require [access credentials](/platform-cloud/integrations/overview).
:::note
- Nextflow pipelines are Git repositories that can reside on any public or private Git-hosting platform. See [Git integration](../git/overview) in the Seqera docs and [Pipeline sharing](https://docs.seqera.io/nextflow/sharing) in the Nextflow docs for more details.
+ Nextflow pipelines are Git repositories that can reside on any public or private Git-hosting platform. See [Git integration](/platform-cloud/integrations/git-providers/overview) in the Seqera docs and [Pipeline sharing](https://docs.seqera.io/nextflow/sharing) in the Nextflow docs for more details.
:::
- **Revision**: A valid repository commit ID, tag, or branch name. Determines the version of the pipeline to launch.
- **Commit ID**: Pin pipeline revision to the most recent HEAD commit ID. If no commit ID is pinned, the latest revision of the repository branch or tag is used.
diff --git a/platform-cloud/docs/orgs-and-teams/workspace-management.md b/platform-cloud/docs/orgs-and-teams/workspace-management.md
index 74dd78624..6ba70e8f3 100644
--- a/platform-cloud/docs/orgs-and-teams/workspace-management.md
+++ b/platform-cloud/docs/orgs-and-teams/workspace-management.md
@@ -31,7 +31,7 @@ Organization owners and admins can create a new workspace within an organization
As a workspace owner, you can modify optional workspace fields after workspace creation. You can either select **Edit** on an organization's workspaces list or the **Settings** tab within the workspace page.
:::
-Apart from the **Participants** tab, the _organization_ workspace is similar to the _user_ workspace. As such, the relation to [runs](../launch/launchpad), [actions](../pipeline-actions/overview), [compute environments](../compute-envs/overview), and [credentials](../credentials/overview) is the same.
+Apart from the **Participants** tab, the _organization_ workspace is similar to the _user_ workspace. As such, the relation to [runs](../launch/launchpad), [actions](../pipeline-actions/overview), [compute environments](../compute-envs/overview), and [credentials](/platform-cloud/integrations/overview) is the same.
## Workspace settings
diff --git a/platform-cloud/docs/quickstart.md b/platform-cloud/docs/quickstart.md
index 410168d32..b839fca2a 100644
--- a/platform-cloud/docs/quickstart.md
+++ b/platform-cloud/docs/quickstart.md
@@ -20,7 +20,7 @@ When you first log in after verifying your email, Seqera automatically creates a
Seqera provisions four types of resources to get you started:
- A [Seqera Compute environment](./compute-envs/seqera-compute.md) with $100 in free credits
-- [Credentials](./credentials/overview.md) used by your compute environment to create and manage cloud resources on your behalf
+- [Credentials](/platform-cloud/integrations/overview) used by your compute environment to create and manage cloud resources on your behalf
- A cloud storage bucket in [Data Explorer](./data/data-explorer.md)
- Pre-configured nf-core pipelines, ready to launch
diff --git a/platform-cloud/docs/resource-labels/overview.md b/platform-cloud/docs/resource-labels/overview.md
index aa611fcd9..b17f082b5 100644
--- a/platform-cloud/docs/resource-labels/overview.md
+++ b/platform-cloud/docs/resource-labels/overview.md
@@ -165,7 +165,7 @@ The following resources are tagged using the resource labels associated with the
At execution time, when jobs are submitted to Batch, the requests are set up to propagate tags to all the instances and volumes created by the head job.
-The [IAM permissions](../compute-envs/aws-batch.md#required-platform-iam-permissions) contain the roles needed for Batch Forge-created AWS Batch compute environments to tag AWS resources. Specifically, the required roles are `iam:TagRole`, `iam:TagInstanceProfile`, and `batch:TagResource`.
+The [AWS IAM policies](/platform-cloud/integrations/cloud-providers/aws/iam-policies) contain the roles needed for Batch Forge-created AWS Batch compute environments to tag AWS resources. Specifically, the required roles are `iam:TagRole`, `iam:TagInstanceProfile`, and `batch:TagResource`.
To view and manage the resource labels applied to AWS resources by Seqera and Nextflow, go to the [AWS Tag Editor](https://docs.aws.amazon.com/tag-editor/latest/userguide/find-resources-to-tag.html) (as an administrative user) and follow these steps:
diff --git a/platform-cloud/docs/secrets/overview.md b/platform-cloud/docs/secrets/overview.md
index cb4ec764a..23e5006ff 100644
--- a/platform-cloud/docs/secrets/overview.md
+++ b/platform-cloud/docs/secrets/overview.md
@@ -41,7 +41,7 @@ In AWS Batch compute environments, Seqera passes stored secrets to jobs as part
## AWS Secrets Manager integration
-Seqera and associated AWS Batch IAM Roles require [specific permissions](../compute-envs/aws-batch#pipeline-secrets-optional) to interact with AWS Secrets Manager.
+Seqera and associated AWS Batch IAM Roles require [specific permissions](/platform-cloud/integrations/cloud-providers/aws/iam-policies) to interact with AWS Secrets Manager.
:::note
If you plan to limit the scope of this IAM policy, please ensure that the ListSecrets action remains granted on all resources (`"Resource": "*"`).
diff --git a/platform-cloud/docs/seqera-mcp/overview.md b/platform-cloud/docs/seqera-mcp/overview.md
index cd4792d9e..3245bc73a 100644
--- a/platform-cloud/docs/seqera-mcp/overview.md
+++ b/platform-cloud/docs/seqera-mcp/overview.md
@@ -34,7 +34,7 @@ https://mcp.seqera.io/mcp
Seqera MCP supports two authentication methods:
- **OAuth 2.1** (recommended): Interactive login through Seqera Platform. Your browser opens automatically to authenticate when connecting.
-- **Personal Access Token**: Use your Seqera Platform [access token](https://docs.seqera.io/platform-cloud/credentials/overview) as a Bearer token. Useful for clients that don't support OAuth.
+- **Personal Access Token**: Use your Seqera Platform [access token](https://docs.seqera.io/platform-cloud/integrations/overview) as a Bearer token. Useful for clients that don't support OAuth.
## Client setup
diff --git a/platform-cloud/docs/supported_software/agent/overview.md b/platform-cloud/docs/supported_software/agent/overview.md
index 440a7e6b0..63259190d 100644
--- a/platform-cloud/docs/supported_software/agent/overview.md
+++ b/platform-cloud/docs/supported_software/agent/overview.md
@@ -29,7 +29,7 @@ Before running the Agent:
1. Create a [**personal access token**](https://docs.seqera.io/platform-api/create-token).
-2. Create [Tower Agent credentials](../../credentials/agent_credentials) in a Seqera Platform workspace.
+2. Create [Tower Agent credentials](/platform-cloud/integrations/agent/credentials) in a Seqera Platform workspace.
:::note
To share a single Tower Agent instance with all members of a workspace, create a Tower Agent credential with **Shared agent** enabled.